1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/workqueue.h>
12 #include <linux/aer.h>
14 #include <linux/io-64-nonatomic-lo-hi.h>
15 #include <linux/device.h>
16 #include <linux/idr.h>
17 #include <linux/intel-svm.h>
18 #include <linux/iommu.h>
19 #include <uapi/linux/idxd.h>
20 #include <linux/dmaengine.h>
21 #include "../dmaengine.h"
22 #include "registers.h"
26 MODULE_VERSION(IDXD_DRIVER_VERSION);
27 MODULE_LICENSE("GPL v2");
28 MODULE_AUTHOR("Intel Corporation");
30 static bool sva = true;
31 module_param(sva, bool, 0644);
32 MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
34 #define DRV_NAME "idxd"
39 static struct idxd_driver_data idxd_driver_data[] = {
42 .type = IDXD_TYPE_DSA,
43 .compl_size = sizeof(struct dsa_completion_record),
45 .dev_type = &dsa_device_type,
49 .type = IDXD_TYPE_IAX,
50 .compl_size = sizeof(struct iax_completion_record),
52 .dev_type = &iax_device_type,
56 static struct pci_device_id idxd_pci_tbl[] = {
57 /* DSA ver 1.0 platforms */
58 { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) },
60 /* IAX ver 1.0 platforms */
61 { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
64 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
66 static int idxd_setup_interrupts(struct idxd_device *idxd)
68 struct pci_dev *pdev = idxd->pdev;
69 struct device *dev = &pdev->dev;
70 struct idxd_irq_entry *irq_entry;
74 msixcnt = pci_msix_vec_count(pdev);
76 dev_err(dev, "Not MSI-X interrupt capable.\n");
80 rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
82 dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
85 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
88 * We implement 1 completion list per MSI-X entry except for
89 * entry 0, which is for errors and others.
91 idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
92 GFP_KERNEL, dev_to_node(dev));
93 if (!idxd->irq_entries) {
98 for (i = 0; i < msixcnt; i++) {
99 idxd->irq_entries[i].id = i;
100 idxd->irq_entries[i].idxd = idxd;
101 idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
102 spin_lock_init(&idxd->irq_entries[i].list_lock);
105 idxd_msix_perm_setup(idxd);
107 irq_entry = &idxd->irq_entries[0];
108 rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
109 0, "idxd-misc", irq_entry);
111 dev_err(dev, "Failed to allocate misc interrupt.\n");
115 dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
117 /* first MSI-X entry is not for wq interrupts */
118 idxd->num_wq_irqs = msixcnt - 1;
120 for (i = 1; i < msixcnt; i++) {
121 irq_entry = &idxd->irq_entries[i];
123 init_llist_head(&idxd->irq_entries[i].pending_llist);
124 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
125 rc = request_threaded_irq(irq_entry->vector, NULL,
126 idxd_wq_thread, 0, "idxd-portal", irq_entry);
128 dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
132 dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
133 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
135 * The MSIX vector enumeration starts at 1 with vector 0 being the
136 * misc interrupt that handles non I/O completion events. The
137 * interrupt handles are for IMS enumeration on guest. The misc
138 * interrupt vector does not require a handle and therefore we start
139 * the int_handles at index 0. Since 'i' starts at 1, the first
140 * int_handles index will be 0.
142 rc = idxd_device_request_int_handle(idxd, i, &idxd->int_handles[i - 1],
145 free_irq(irq_entry->vector, irq_entry);
148 dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i - 1]);
152 idxd_unmask_error_interrupts(idxd);
157 irq_entry = &idxd->irq_entries[i];
158 free_irq(irq_entry->vector, irq_entry);
160 idxd_device_release_int_handle(idxd,
161 idxd->int_handles[i], IDXD_IRQ_MSIX);
164 /* Disable error interrupt generation */
165 idxd_mask_error_interrupts(idxd);
166 idxd_msix_perm_clear(idxd);
168 pci_free_irq_vectors(pdev);
169 dev_err(dev, "No usable interrupts\n");
173 static void idxd_cleanup_interrupts(struct idxd_device *idxd)
175 struct pci_dev *pdev = idxd->pdev;
176 struct idxd_irq_entry *irq_entry;
179 msixcnt = pci_msix_vec_count(pdev);
183 irq_entry = &idxd->irq_entries[0];
184 free_irq(irq_entry->vector, irq_entry);
186 for (i = 1; i < msixcnt; i++) {
188 irq_entry = &idxd->irq_entries[i];
189 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))
190 idxd_device_release_int_handle(idxd, idxd->int_handles[i],
192 free_irq(irq_entry->vector, irq_entry);
195 idxd_mask_error_interrupts(idxd);
196 pci_free_irq_vectors(pdev);
199 static int idxd_setup_wqs(struct idxd_device *idxd)
201 struct device *dev = &idxd->pdev->dev;
205 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
206 GFP_KERNEL, dev_to_node(dev));
210 for (i = 0; i < idxd->max_wqs; i++) {
211 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
219 device_initialize(&wq->conf_dev);
220 wq->conf_dev.parent = &idxd->conf_dev;
221 wq->conf_dev.bus = &dsa_bus_type;
222 wq->conf_dev.type = &idxd_wq_device_type;
223 rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
225 put_device(&wq->conf_dev);
229 mutex_init(&wq->wq_lock);
230 init_waitqueue_head(&wq->err_queue);
231 init_completion(&wq->wq_dead);
232 wq->max_xfer_bytes = idxd->max_xfer_bytes;
233 wq->max_batch_size = idxd->max_batch_size;
234 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
236 put_device(&wq->conf_dev);
247 put_device(&idxd->wqs[i]->conf_dev);
251 static int idxd_setup_engines(struct idxd_device *idxd)
253 struct idxd_engine *engine;
254 struct device *dev = &idxd->pdev->dev;
257 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
258 GFP_KERNEL, dev_to_node(dev));
262 for (i = 0; i < idxd->max_engines; i++) {
263 engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
271 device_initialize(&engine->conf_dev);
272 engine->conf_dev.parent = &idxd->conf_dev;
273 engine->conf_dev.bus = &dsa_bus_type;
274 engine->conf_dev.type = &idxd_engine_device_type;
275 rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
277 put_device(&engine->conf_dev);
281 idxd->engines[i] = engine;
288 put_device(&idxd->engines[i]->conf_dev);
292 static int idxd_setup_groups(struct idxd_device *idxd)
294 struct device *dev = &idxd->pdev->dev;
295 struct idxd_group *group;
298 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
299 GFP_KERNEL, dev_to_node(dev));
303 for (i = 0; i < idxd->max_groups; i++) {
304 group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
312 device_initialize(&group->conf_dev);
313 group->conf_dev.parent = &idxd->conf_dev;
314 group->conf_dev.bus = &dsa_bus_type;
315 group->conf_dev.type = &idxd_group_device_type;
316 rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
318 put_device(&group->conf_dev);
322 idxd->groups[i] = group;
331 put_device(&idxd->groups[i]->conf_dev);
335 static void idxd_cleanup_internals(struct idxd_device *idxd)
339 for (i = 0; i < idxd->max_groups; i++)
340 put_device(&idxd->groups[i]->conf_dev);
341 for (i = 0; i < idxd->max_engines; i++)
342 put_device(&idxd->engines[i]->conf_dev);
343 for (i = 0; i < idxd->max_wqs; i++)
344 put_device(&idxd->wqs[i]->conf_dev);
345 destroy_workqueue(idxd->wq);
348 static int idxd_setup_internals(struct idxd_device *idxd)
350 struct device *dev = &idxd->pdev->dev;
353 init_waitqueue_head(&idxd->cmd_waitq);
355 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
356 idxd->int_handles = kcalloc_node(idxd->max_wqs, sizeof(int), GFP_KERNEL,
358 if (!idxd->int_handles)
362 rc = idxd_setup_wqs(idxd);
366 rc = idxd_setup_engines(idxd);
370 rc = idxd_setup_groups(idxd);
374 idxd->wq = create_workqueue(dev_name(dev));
383 for (i = 0; i < idxd->max_groups; i++)
384 put_device(&idxd->groups[i]->conf_dev);
386 for (i = 0; i < idxd->max_engines; i++)
387 put_device(&idxd->engines[i]->conf_dev);
389 for (i = 0; i < idxd->max_wqs; i++)
390 put_device(&idxd->wqs[i]->conf_dev);
392 kfree(idxd->int_handles);
396 static void idxd_read_table_offsets(struct idxd_device *idxd)
398 union offsets_reg offsets;
399 struct device *dev = &idxd->pdev->dev;
401 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
402 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
403 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
404 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
405 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
406 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
407 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
408 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
409 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
410 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
413 static void idxd_read_caps(struct idxd_device *idxd)
415 struct device *dev = &idxd->pdev->dev;
418 /* reading generic capabilities */
419 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
420 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
422 if (idxd->hw.gen_cap.cmd_cap) {
423 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET);
424 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
427 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
428 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
429 idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
430 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
431 if (idxd->hw.gen_cap.config_en)
432 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
434 /* reading group capabilities */
435 idxd->hw.group_cap.bits =
436 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
437 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
438 idxd->max_groups = idxd->hw.group_cap.num_groups;
439 dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
440 idxd->max_tokens = idxd->hw.group_cap.total_tokens;
441 dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
442 idxd->nr_tokens = idxd->max_tokens;
444 /* read engine capabilities */
445 idxd->hw.engine_cap.bits =
446 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
447 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
448 idxd->max_engines = idxd->hw.engine_cap.num_engines;
449 dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
451 /* read workqueue capabilities */
452 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
453 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
454 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
455 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
456 idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
457 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
458 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
459 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
461 /* reading operation capabilities */
462 for (i = 0; i < 4; i++) {
463 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
464 IDXD_OPCAP_OFFSET + i * sizeof(u64));
465 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
469 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
471 struct device *dev = &pdev->dev;
472 struct idxd_device *idxd;
475 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
481 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
485 device_initialize(&idxd->conf_dev);
486 idxd->conf_dev.parent = dev;
487 idxd->conf_dev.bus = &dsa_bus_type;
488 idxd->conf_dev.type = idxd->data->dev_type;
489 rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
491 put_device(&idxd->conf_dev);
495 spin_lock_init(&idxd->dev_lock);
496 spin_lock_init(&idxd->cmd_lock);
501 static int idxd_enable_system_pasid(struct idxd_device *idxd)
505 struct iommu_sva *sva;
507 flags = SVM_FLAG_SUPERVISOR_MODE;
509 sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags);
511 dev_warn(&idxd->pdev->dev,
512 "iommu sva bind failed: %ld\n", PTR_ERR(sva));
516 pasid = iommu_sva_get_pasid(sva);
517 if (pasid == IOMMU_PASID_INVALID) {
518 iommu_sva_unbind_device(sva);
524 dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid);
528 static void idxd_disable_system_pasid(struct idxd_device *idxd)
531 iommu_sva_unbind_device(idxd->sva);
535 static int idxd_probe(struct idxd_device *idxd)
537 struct pci_dev *pdev = idxd->pdev;
538 struct device *dev = &pdev->dev;
541 dev_dbg(dev, "%s entered and resetting device\n", __func__);
542 rc = idxd_device_init_reset(idxd);
546 dev_dbg(dev, "IDXD reset complete\n");
548 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
549 rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA);
551 rc = idxd_enable_system_pasid(idxd);
553 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
554 dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
556 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
559 dev_warn(dev, "Unable to turn on SVA feature.\n");
562 dev_warn(dev, "User forced SVA off via module param.\n");
565 idxd_read_caps(idxd);
566 idxd_read_table_offsets(idxd);
568 rc = idxd_setup_internals(idxd);
572 /* If the configs are readonly, then load them from device */
573 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
574 dev_dbg(dev, "Loading RO device config\n");
575 rc = idxd_device_load_config(idxd);
580 rc = idxd_setup_interrupts(idxd);
584 dev_dbg(dev, "IDXD interrupt setup complete.\n");
586 idxd->major = idxd_cdev_get_major(idxd);
588 rc = perfmon_pmu_init(idxd);
590 dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc);
592 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
596 idxd_cleanup_internals(idxd);
598 if (device_pasid_enabled(idxd))
599 idxd_disable_system_pasid(idxd);
600 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
604 static void idxd_cleanup(struct idxd_device *idxd)
606 struct device *dev = &idxd->pdev->dev;
608 perfmon_pmu_remove(idxd);
609 idxd_cleanup_interrupts(idxd);
610 idxd_cleanup_internals(idxd);
611 if (device_pasid_enabled(idxd))
612 idxd_disable_system_pasid(idxd);
613 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
616 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
618 struct device *dev = &pdev->dev;
619 struct idxd_device *idxd;
620 struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data;
623 rc = pci_enable_device(pdev);
627 dev_dbg(dev, "Alloc IDXD context\n");
628 idxd = idxd_alloc(pdev, data);
634 dev_dbg(dev, "Mapping BARs\n");
635 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
636 if (!idxd->reg_base) {
641 dev_dbg(dev, "Set DMA masks\n");
642 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
644 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
648 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
650 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
654 dev_dbg(dev, "Set PCI master\n");
655 pci_set_master(pdev);
656 pci_set_drvdata(pdev, idxd);
658 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
659 rc = idxd_probe(idxd);
661 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
665 rc = idxd_register_devices(idxd);
667 dev_err(dev, "IDXD sysfs setup failed\n");
668 goto err_dev_register;
671 idxd->state = IDXD_DEV_CONF_READY;
673 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
681 pci_iounmap(pdev, idxd->reg_base);
683 put_device(&idxd->conf_dev);
685 pci_disable_device(pdev);
689 static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
691 struct idxd_desc *desc, *itr;
692 struct llist_node *head;
694 head = llist_del_all(&ie->pending_llist);
698 llist_for_each_entry_safe(desc, itr, head, llnode) {
699 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
700 idxd_free_desc(desc->wq, desc);
704 static void idxd_flush_work_list(struct idxd_irq_entry *ie)
706 struct idxd_desc *desc, *iter;
708 list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
709 list_del(&desc->list);
710 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
711 idxd_free_desc(desc->wq, desc);
715 void idxd_wqs_quiesce(struct idxd_device *idxd)
720 for (i = 0; i < idxd->max_wqs; i++) {
722 if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL)
727 static void idxd_release_int_handles(struct idxd_device *idxd)
729 struct device *dev = &idxd->pdev->dev;
732 for (i = 0; i < idxd->num_wq_irqs; i++) {
733 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) {
734 rc = idxd_device_release_int_handle(idxd, idxd->int_handles[i],
737 dev_warn(dev, "irq handle %d release failed\n",
738 idxd->int_handles[i]);
740 dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i]);
745 static void idxd_shutdown(struct pci_dev *pdev)
747 struct idxd_device *idxd = pci_get_drvdata(pdev);
749 struct idxd_irq_entry *irq_entry;
750 int msixcnt = pci_msix_vec_count(pdev);
752 rc = idxd_device_disable(idxd);
754 dev_err(&pdev->dev, "Disabling device failed\n");
756 dev_dbg(&pdev->dev, "%s called\n", __func__);
757 idxd_mask_msix_vectors(idxd);
758 idxd_mask_error_interrupts(idxd);
760 for (i = 0; i < msixcnt; i++) {
761 irq_entry = &idxd->irq_entries[i];
762 synchronize_irq(irq_entry->vector);
765 idxd_flush_pending_llist(irq_entry);
766 idxd_flush_work_list(irq_entry);
768 flush_workqueue(idxd->wq);
771 static void idxd_remove(struct pci_dev *pdev)
773 struct idxd_device *idxd = pci_get_drvdata(pdev);
774 struct idxd_irq_entry *irq_entry;
775 int msixcnt = pci_msix_vec_count(pdev);
778 dev_dbg(&pdev->dev, "%s called\n", __func__);
780 if (device_pasid_enabled(idxd))
781 idxd_disable_system_pasid(idxd);
782 idxd_unregister_devices(idxd);
784 for (i = 0; i < msixcnt; i++) {
785 irq_entry = &idxd->irq_entries[i];
786 free_irq(irq_entry->vector, irq_entry);
788 idxd_msix_perm_clear(idxd);
789 idxd_release_int_handles(idxd);
790 pci_free_irq_vectors(pdev);
791 pci_iounmap(pdev, idxd->reg_base);
792 iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
793 pci_disable_device(pdev);
794 destroy_workqueue(idxd->wq);
795 perfmon_pmu_remove(idxd);
796 device_unregister(&idxd->conf_dev);
799 static struct pci_driver idxd_pci_driver = {
801 .id_table = idxd_pci_tbl,
802 .probe = idxd_pci_probe,
803 .remove = idxd_remove,
804 .shutdown = idxd_shutdown,
807 static int __init idxd_init_module(void)
812 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
813 * enumerating the device. We can not utilize it.
815 if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
816 pr_warn("idxd driver failed to load without MOVDIR64B.\n");
820 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
821 pr_warn("Platform does not have ENQCMD(S) support.\n");
823 support_enqcmd = true;
827 err = idxd_register_bus_type();
831 err = idxd_register_driver();
833 goto err_idxd_driver_register;
835 err = idxd_cdev_register();
837 goto err_cdev_register;
839 err = pci_register_driver(&idxd_pci_driver);
841 goto err_pci_register;
848 idxd_unregister_driver();
849 err_idxd_driver_register:
850 idxd_unregister_bus_type();
853 module_init(idxd_init_module);
855 static void __exit idxd_exit_module(void)
857 idxd_unregister_driver();
858 pci_unregister_driver(&idxd_pci_driver);
860 idxd_unregister_bus_type();
863 module_exit(idxd_exit_module);