1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
8 #include <linux/device/driver.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/pm_runtime.h>
13 #include <drm/drm_color_mgmt.h>
14 #include <drm/drm_drv.h>
15 #include <drm/xe_pciids.h>
17 #include "regs/xe_regs.h"
18 #include "regs/xe_gt_regs.h"
19 #include "xe_device.h"
20 #include "xe_display.h"
23 #include "xe_macros.h"
24 #include "xe_module.h"
25 #include "xe_pci_types.h"
35 struct xe_subplatform_desc {
36 enum xe_subplatform subplatform;
47 struct xe_device_desc {
48 /* Should only ever be set for platforms without GMD_ID */
49 const struct xe_graphics_desc *graphics;
50 /* Should only ever be set for platforms without GMD_ID */
51 const struct xe_media_desc *media;
53 const char *platform_name;
54 const struct xe_subplatform_desc *subplatforms;
56 enum xe_platform platform;
58 u8 require_force_probe:1;
66 u8 supports_mmio_ext:1;
70 __diag_ignore_all("-Woverride-init", "Allow field overrides in table");
78 static const struct xe_graphics_desc graphics_xelp = {
83 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
90 static const struct xe_graphics_desc graphics_xelpp = {
95 .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
102 #define XE_HP_FEATURES \
103 .has_range_tlb_invalidation = true, \
104 .has_flat_ccs = true, \
105 .dma_mask_size = 46, \
109 static const struct xe_graphics_desc graphics_xehpg = {
115 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
116 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) |
117 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
120 .vram_flags = XE_VRAM_FLAGS_NEED64K,
123 static const struct xe_graphics_desc graphics_xehpc = {
129 BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) |
130 BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) |
131 BIT(XE_HW_ENGINE_BCS4) | BIT(XE_HW_ENGINE_BCS5) |
132 BIT(XE_HW_ENGINE_BCS6) | BIT(XE_HW_ENGINE_BCS7) |
133 BIT(XE_HW_ENGINE_BCS8) |
134 BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) |
135 BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
139 .max_remote_tiles = 1,
142 .vram_flags = XE_VRAM_FLAGS_NEED64K,
149 static const struct xe_graphics_desc graphics_xelpg = {
152 BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
153 BIT(XE_HW_ENGINE_CCS0),
159 #define XE2_GFX_FEATURES \
160 .dma_mask_size = 46, \
162 .has_flat_ccs = 0 /* FIXME: implementation missing */, \
163 .has_range_tlb_invalidation = 1, \
164 .supports_usm = 0 /* FIXME: implementation missing */, \
168 BIT(XE_HW_ENGINE_RCS0) | \
169 BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \
170 GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)
172 static const struct xe_graphics_desc graphics_xe2 = {
178 static const struct xe_media_desc media_xem = {
184 BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS2) |
185 BIT(XE_HW_ENGINE_VECS0),
188 static const struct xe_media_desc media_xehpm = {
194 BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS2) |
195 BIT(XE_HW_ENGINE_VECS0) | BIT(XE_HW_ENGINE_VECS1),
198 static const struct xe_media_desc media_xelpmp = {
201 BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS2) |
202 BIT(XE_HW_ENGINE_VECS0), /* TODO: add GSC0 */
205 static const struct xe_media_desc media_xe2 = {
208 BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VECS0), /* TODO: GSC0 */
211 static const struct xe_device_desc tgl_desc = {
212 .graphics = &graphics_xelp,
214 PLATFORM(XE_TIGERLAKE),
217 .require_force_probe = true,
220 static const struct xe_device_desc rkl_desc = {
221 .graphics = &graphics_xelp,
223 PLATFORM(XE_ROCKETLAKE),
226 .require_force_probe = true,
229 static const u16 adls_rpls_ids[] = { XE_RPLS_IDS(NOP), 0 };
231 static const struct xe_device_desc adl_s_desc = {
232 .graphics = &graphics_xelp,
234 PLATFORM(XE_ALDERLAKE_S),
237 .require_force_probe = true,
238 .subplatforms = (const struct xe_subplatform_desc[]) {
239 { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids },
244 static const u16 adlp_rplu_ids[] = { XE_RPLU_IDS(NOP), 0 };
246 static const struct xe_device_desc adl_p_desc = {
247 .graphics = &graphics_xelp,
249 PLATFORM(XE_ALDERLAKE_P),
252 .require_force_probe = true,
253 .subplatforms = (const struct xe_subplatform_desc[]) {
254 { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids },
259 static const struct xe_device_desc adl_n_desc = {
260 .graphics = &graphics_xelp,
262 PLATFORM(XE_ALDERLAKE_N),
265 .require_force_probe = true,
268 #define DGFX_FEATURES \
271 static const struct xe_device_desc dg1_desc = {
272 .graphics = &graphics_xelpp,
277 .require_force_probe = true,
281 static const u16 dg2_g10_ids[] = { XE_DG2_G10_IDS(NOP), XE_ATS_M150_IDS(NOP), 0 };
282 static const u16 dg2_g11_ids[] = { XE_DG2_G11_IDS(NOP), XE_ATS_M75_IDS(NOP), 0 };
283 static const u16 dg2_g12_ids[] = { XE_DG2_G12_IDS(NOP), 0 };
285 #define DG2_FEATURES \
288 .has_heci_gscfi = 1, \
289 .subplatforms = (const struct xe_subplatform_desc[]) { \
290 { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \
291 { XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \
292 { XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \
296 static const struct xe_device_desc ats_m_desc = {
297 .graphics = &graphics_xehpg,
298 .media = &media_xehpm,
299 .require_force_probe = true,
302 .has_display = false,
305 static const struct xe_device_desc dg2_desc = {
306 .graphics = &graphics_xehpg,
307 .media = &media_xehpm,
308 .require_force_probe = true,
314 static const __maybe_unused struct xe_device_desc pvc_desc = {
315 .graphics = &graphics_xehpc,
318 .has_display = false,
319 .require_force_probe = true,
323 static const struct xe_device_desc mtl_desc = {
324 /* .graphics and .media determined via GMD_ID */
325 .require_force_probe = true,
326 PLATFORM(XE_METEORLAKE),
330 static const struct xe_device_desc lnl_desc = {
331 PLATFORM(XE_LUNARLAKE),
332 .require_force_probe = true,
338 /* Map of GMD_ID values to graphics IP */
339 static struct gmdid_map graphics_ip_map[] = {
340 { 1270, &graphics_xelpg },
341 { 1271, &graphics_xelpg },
342 { 2004, &graphics_xe2 },
345 /* Map of GMD_ID values to media IP */
346 static struct gmdid_map media_ip_map[] = {
347 { 1300, &media_xelpmp },
348 { 2000, &media_xe2 },
351 #define INTEL_VGA_DEVICE(id, info) { \
352 PCI_DEVICE(PCI_VENDOR_ID_INTEL, id), \
353 PCI_BASE_CLASS_DISPLAY << 16, 0xff << 16, \
354 (unsigned long) info }
357 * Make sure any device matches here are from most specific to most
358 * general. For example, since the Quanta match is based on the subsystem
359 * and subvendor IDs, we need it to come before the more general IVB
360 * PCI ID matches, otherwise we'll use the wrong info struct above.
362 static const struct pci_device_id pciidlist[] = {
363 XE_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc),
364 XE_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc),
365 XE_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
366 XE_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
367 XE_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc),
368 XE_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
369 XE_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
370 XE_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc),
371 XE_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc),
372 XE_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc),
373 XE_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
374 XE_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
377 MODULE_DEVICE_TABLE(pci, pciidlist);
379 #undef INTEL_VGA_DEVICE
381 /* is device_id present in comma separated list of ids */
382 static bool device_id_in_list(u16 device_id, const char *devices, bool negative)
387 if (!devices || !*devices)
390 /* match everything */
391 if (negative && strcmp(devices, "!*") == 0)
393 if (!negative && strcmp(devices, "*") == 0)
396 s = kstrdup(devices, GFP_KERNEL);
400 for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) {
403 if (negative && tok[0] == '!')
405 else if ((negative && tok[0] != '!') ||
406 (!negative && tok[0] == '!'))
409 if (kstrtou16(tok, 16, &val) == 0 && val == device_id) {
420 static bool id_forced(u16 device_id)
422 return device_id_in_list(device_id, xe_param_force_probe, false);
425 static bool id_blocked(u16 device_id)
427 return device_id_in_list(device_id, xe_param_force_probe, true);
430 static const struct xe_subplatform_desc *
431 find_subplatform(const struct xe_device *xe, const struct xe_device_desc *desc)
433 const struct xe_subplatform_desc *sp;
436 for (sp = desc->subplatforms; sp && sp->subplatform; sp++)
437 for (id = sp->pciidlist; *id; id++)
438 if (*id == xe->info.devid)
444 static void peek_gmdid(struct xe_device *xe, u32 gmdid_offset, u32 *ver, u32 *revid)
446 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
447 void __iomem *map = pci_iomap_range(pdev, 0, gmdid_offset, sizeof(u32));
451 drm_err(&xe->drm, "Failed to read GMD_ID (%#x) from PCI BAR.\n",
460 pci_iounmap(pdev, map);
462 *ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 +
463 REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
464 *revid = REG_FIELD_GET(GMD_ID_REVID, val);
468 * Pre-GMD_ID platform: device descriptor already points to the appropriate
469 * graphics descriptor. Simply forward the description and calculate the version
470 * appropriately. "graphics" should be present in all such platforms, while
473 static void handle_pre_gmdid(struct xe_device *xe,
474 const struct xe_device_desc *desc,
475 const struct xe_graphics_desc **graphics,
476 const struct xe_media_desc **media)
478 *graphics = desc->graphics;
479 xe->info.graphics_verx100 = (*graphics)->ver * 100 + (*graphics)->rel;
481 *media = desc->media;
483 xe->info.media_verx100 = (*media)->ver * 100 + (*media)->rel;
488 * GMD_ID platform: read IP version from hardware and select graphics descriptor
489 * based on the result.
491 static void handle_gmdid(struct xe_device *xe,
492 const struct xe_device_desc *desc,
493 const struct xe_graphics_desc **graphics,
494 const struct xe_media_desc **media,
500 peek_gmdid(xe, GMD_ID.addr, &ver, graphics_revid);
501 for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) {
502 if (ver == graphics_ip_map[i].ver) {
503 xe->info.graphics_verx100 = ver;
504 *graphics = graphics_ip_map[i].ip;
510 if (!xe->info.graphics_verx100) {
511 drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n",
512 ver / 100, ver % 100);
515 peek_gmdid(xe, GMD_ID.addr + 0x380000, &ver, media_revid);
517 /* Media may legitimately be fused off / not present */
521 for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) {
522 if (ver == media_ip_map[i].ver) {
523 xe->info.media_verx100 = ver;
524 *media = media_ip_map[i].ip;
530 if (!xe->info.media_verx100) {
531 drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n",
532 ver / 100, ver % 100);
536 static int xe_info_init(struct xe_device *xe,
537 const struct xe_device_desc *desc,
538 const struct xe_subplatform_desc *subplatform_desc)
540 const struct xe_graphics_desc *graphics_desc = NULL;
541 const struct xe_media_desc *media_desc = NULL;
542 u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0;
543 struct xe_tile *tile;
547 xe->info.platform = desc->platform;
548 xe->info.subplatform = subplatform_desc ?
549 subplatform_desc->subplatform : XE_SUBPLATFORM_NONE;
552 * If this platform supports GMD_ID, we'll detect the proper IP
553 * descriptor to use from hardware registers. desc->graphics will only
554 * ever be set at this point for platforms before GMD_ID. In that case
555 * the IP descriptions and versions are simply derived from that.
557 if (desc->graphics) {
558 handle_pre_gmdid(xe, desc, &graphics_desc, &media_desc);
559 xe->info.step = xe_step_pre_gmdid_get(xe);
561 handle_gmdid(xe, desc, &graphics_desc, &media_desc,
562 &graphics_gmdid_revid, &media_gmdid_revid);
563 xe->info.step = xe_step_gmdid_get(xe,
564 graphics_gmdid_revid,
569 * If we couldn't detect the graphics IP, that's considered a fatal
570 * error and we should abort driver load. Failing to detect media
571 * IP is non-fatal; we'll just proceed without enabling media support.
576 xe->info.is_dgfx = desc->is_dgfx;
577 xe->info.has_heci_gscfi = desc->has_heci_gscfi;
578 xe->info.graphics_name = graphics_desc->name;
579 xe->info.media_name = media_desc ? media_desc->name : "none";
580 xe->info.has_llc = desc->has_llc;
581 xe->info.has_sriov = desc->has_sriov;
582 xe->info.bypass_mtcfg = desc->bypass_mtcfg;
583 xe->info.supports_mmio_ext = desc->supports_mmio_ext;
584 xe->info.tile_mmio_ext_size = graphics_desc->tile_mmio_ext_size;
586 xe->info.dma_mask_size = graphics_desc->dma_mask_size;
587 xe->info.vram_flags = graphics_desc->vram_flags;
588 xe->info.va_bits = graphics_desc->va_bits;
589 xe->info.vm_max_level = graphics_desc->vm_max_level;
590 xe->info.supports_usm = graphics_desc->supports_usm;
591 xe->info.has_asid = graphics_desc->has_asid;
592 xe->info.has_flat_ccs = graphics_desc->has_flat_ccs;
593 xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
595 xe->info.enable_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
599 * All platforms have at least one primary GT. Any platform with media
600 * version 13 or higher has an additional dedicated media GT. And
601 * depending on the graphics IP there may be additional "remote tiles."
602 * All of these together determine the overall GT count.
604 * FIXME: 'tile_count' here is misnamed since the rest of the driver
605 * treats it as the number of GTs rather than just the number of tiles.
607 xe->info.tile_count = 1 + graphics_desc->max_remote_tiles;
609 for_each_tile(tile, xe, id) {
613 tile->primary_gt = xe_gt_alloc(tile);
614 if (IS_ERR(tile->primary_gt))
615 return PTR_ERR(tile->primary_gt);
617 gt = tile->primary_gt;
618 gt->info.id = xe->info.gt_count++;
619 gt->info.type = XE_GT_TYPE_MAIN;
620 gt->info.__engine_mask = graphics_desc->hw_engine_mask;
621 if (MEDIA_VER(xe) < 13 && media_desc)
622 gt->info.__engine_mask |= media_desc->hw_engine_mask;
624 if (MEDIA_VER(xe) < 13 || !media_desc)
628 * Allocate and setup media GT for platforms with standalone
631 tile->media_gt = xe_gt_alloc(tile);
632 if (IS_ERR(tile->media_gt))
633 return PTR_ERR(tile->media_gt);
636 gt->info.type = XE_GT_TYPE_MEDIA;
637 gt->info.__engine_mask = media_desc->hw_engine_mask;
638 gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
639 gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
642 * FIXME: At the moment multi-tile and standalone media are
643 * mutually exclusive on current platforms. We'll need to
644 * come up with a better way to number GTs if we ever wind
645 * up with platforms that support both together.
647 drm_WARN_ON(&xe->drm, id != 0);
648 gt->info.id = xe->info.gt_count++;
654 static void xe_pci_remove(struct pci_dev *pdev)
656 struct xe_device *xe;
658 xe = pci_get_drvdata(pdev);
659 if (!xe) /* driver load aborted, nothing to cleanup */
662 xe_device_remove(xe);
663 xe_pm_runtime_fini(xe);
664 pci_set_drvdata(pdev, NULL);
667 static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
669 const struct xe_device_desc *desc = (const void *)ent->driver_data;
670 const struct xe_subplatform_desc *subplatform_desc;
671 struct xe_device *xe;
674 if (desc->require_force_probe && !id_forced(pdev->device)) {
676 "Your graphics device %04x is not officially supported\n"
677 "by xe driver in this kernel version. To force Xe probe,\n"
678 "use xe.force_probe='%04x' and i915.force_probe='!%04x'\n"
679 "module parameters or CONFIG_DRM_XE_FORCE_PROBE='%04x' and\n"
680 "CONFIG_DRM_I915_FORCE_PROBE='!%04x' configuration options.\n",
681 pdev->device, pdev->device, pdev->device,
682 pdev->device, pdev->device);
686 if (id_blocked(pdev->device)) {
687 dev_info(&pdev->dev, "Probe blocked for device [%04x:%04x].\n",
688 pdev->vendor, pdev->device);
692 if (xe_display_driver_probe_defer(pdev))
693 return -EPROBE_DEFER;
695 xe = xe_device_create(pdev, ent);
699 xe_pm_assert_unbounded_bridge(xe);
700 subplatform_desc = find_subplatform(xe, desc);
702 pci_set_drvdata(pdev, xe);
703 err = pci_enable_device(pdev);
707 pci_set_master(pdev);
709 xe_sriov_probe_early(xe, desc->has_sriov);
711 err = xe_info_init(xe, desc, subplatform_desc);
713 goto err_pci_disable;
715 xe_display_probe(xe);
717 drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d",
719 subplatform_desc ? subplatform_desc->name : "",
720 xe->info.devid, xe->info.revid,
722 xe->info.graphics_name,
723 xe->info.graphics_verx100 / 100,
724 xe->info.graphics_verx100 % 100,
726 xe->info.media_verx100 / 100,
727 xe->info.media_verx100 % 100,
728 str_yes_no(xe->info.enable_display),
729 xe->info.dma_mask_size, xe->info.tile_count,
730 xe->info.has_heci_gscfi);
732 drm_dbg(&xe->drm, "Stepping = (G:%s, M:%s, D:%s, B:%s)\n",
733 xe_step_name(xe->info.step.graphics),
734 xe_step_name(xe->info.step.media),
735 xe_step_name(xe->info.step.display),
736 xe_step_name(xe->info.step.basedie));
738 drm_dbg(&xe->drm, "SR-IOV support: %s (mode: %s)\n",
739 str_yes_no(xe_device_has_sriov(xe)),
740 xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
742 err = xe_device_probe(xe);
744 goto err_pci_disable;
751 pci_disable_device(pdev);
754 drm_dev_put(&xe->drm);
759 static void xe_pci_shutdown(struct pci_dev *pdev)
761 xe_device_shutdown(pdev_to_xe_device(pdev));
764 #ifdef CONFIG_PM_SLEEP
765 static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle)
767 struct xe_device *xe = pdev_to_xe_device(pdev);
768 struct pci_dev *root_pdev;
770 if (!xe->d3cold.capable)
773 root_pdev = pcie_find_root_port(pdev);
779 pci_d3cold_disable(root_pdev);
782 pci_d3cold_enable(root_pdev);
787 static int xe_pci_suspend(struct device *dev)
789 struct pci_dev *pdev = to_pci_dev(dev);
792 err = xe_pm_suspend(pdev_to_xe_device(pdev));
797 * Enabling D3Cold is needed for S2Idle/S0ix.
798 * It is save to allow here since xe_pm_suspend has evicted
799 * the local memory and the direct complete optimization is disabled.
801 d3cold_toggle(pdev, D3COLD_ENABLE);
803 pci_save_state(pdev);
804 pci_disable_device(pdev);
809 static int xe_pci_resume(struct device *dev)
811 struct pci_dev *pdev = to_pci_dev(dev);
814 /* Give back the D3Cold decision to the runtime P M*/
815 d3cold_toggle(pdev, D3COLD_DISABLE);
817 err = pci_set_power_state(pdev, PCI_D0);
821 err = pci_enable_device(pdev);
825 pci_set_master(pdev);
827 err = xe_pm_resume(pdev_to_xe_device(pdev));
834 static int xe_pci_runtime_suspend(struct device *dev)
836 struct pci_dev *pdev = to_pci_dev(dev);
837 struct xe_device *xe = pdev_to_xe_device(pdev);
840 err = xe_pm_runtime_suspend(xe);
844 pci_save_state(pdev);
846 if (xe->d3cold.allowed) {
847 d3cold_toggle(pdev, D3COLD_ENABLE);
848 pci_disable_device(pdev);
849 pci_ignore_hotplug(pdev);
850 pci_set_power_state(pdev, PCI_D3cold);
852 d3cold_toggle(pdev, D3COLD_DISABLE);
853 pci_set_power_state(pdev, PCI_D3hot);
859 static int xe_pci_runtime_resume(struct device *dev)
861 struct pci_dev *pdev = to_pci_dev(dev);
862 struct xe_device *xe = pdev_to_xe_device(pdev);
865 err = pci_set_power_state(pdev, PCI_D0);
869 pci_restore_state(pdev);
871 if (xe->d3cold.allowed) {
872 err = pci_enable_device(pdev);
876 pci_set_master(pdev);
879 return xe_pm_runtime_resume(xe);
882 static int xe_pci_runtime_idle(struct device *dev)
884 struct pci_dev *pdev = to_pci_dev(dev);
885 struct xe_device *xe = pdev_to_xe_device(pdev);
887 xe_pm_d3cold_allowed_toggle(xe);
892 static const struct dev_pm_ops xe_pm_ops = {
893 SET_SYSTEM_SLEEP_PM_OPS(xe_pci_suspend, xe_pci_resume)
894 SET_RUNTIME_PM_OPS(xe_pci_runtime_suspend, xe_pci_runtime_resume, xe_pci_runtime_idle)
898 static struct pci_driver xe_pci_driver = {
900 .id_table = pciidlist,
901 .probe = xe_pci_probe,
902 .remove = xe_pci_remove,
903 .shutdown = xe_pci_shutdown,
904 #ifdef CONFIG_PM_SLEEP
905 .driver.pm = &xe_pm_ops,
909 int xe_register_pci_driver(void)
911 return pci_register_driver(&xe_pci_driver);
914 void xe_unregister_pci_driver(void)
916 pci_unregister_driver(&xe_pci_driver);
919 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
920 #include "tests/xe_pci.c"