1 // SPDX-License-Identifier: GPL-2.0
3 * Volume Management Device driver
4 * Copyright (c) 2015, Intel Corporation.
7 #include <linux/device.h>
8 #include <linux/interrupt.h>
9 #include <linux/iommu.h>
10 #include <linux/irq.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/msi.h>
14 #include <linux/pci.h>
15 #include <linux/pci-acpi.h>
16 #include <linux/pci-ecam.h>
17 #include <linux/srcu.h>
18 #include <linux/rculist.h>
19 #include <linux/rcupdate.h>
21 #include <asm/irqdomain.h>
27 #define PCI_REG_VMCAP 0x40
28 #define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1)
29 #define PCI_REG_VMCONFIG 0x44
30 #define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3)
31 #define VMCONFIG_MSI_REMAP 0x2
32 #define PCI_REG_VMLOCK 0x70
33 #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
35 #define MB2_SHADOW_OFFSET 0x2000
36 #define MB2_SHADOW_SIZE 16
40 * Device may contain registers which hint the physical location of the
41 * membars, in order to allow proper address translation during
42 * resource assignment to enable guest virtualization
44 VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0),
47 * Device may provide root port configuration information which limits
50 VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1),
53 * Device contains physical location shadow registers in
54 * vendor-specific capability space
56 VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP = (1 << 2),
59 * Device may use MSI-X vector 0 for software triggering and will not
60 * be used for MSI remapping
62 VMD_FEAT_OFFSET_FIRST_VECTOR = (1 << 3),
65 * Device can bypass remapping MSI-X transactions into its MSI-X table,
66 * avoiding the requirement of a VMD MSI domain for child device
69 VMD_FEAT_CAN_BYPASS_MSI_REMAP = (1 << 4),
72 static DEFINE_IDA(vmd_instance_ida);
75 * Lock for manipulating VMD IRQ lists.
77 static DEFINE_RAW_SPINLOCK(list_lock);
80 * struct vmd_irq - private data to map driver IRQ to the VMD shared vector
81 * @node: list item for parent traversal.
82 * @irq: back pointer to parent.
83 * @enabled: true if driver enabled IRQ
84 * @virq: the virtual IRQ value provided to the requesting driver.
86 * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
87 * a VMD IRQ using this structure.
90 struct list_head node;
91 struct vmd_irq_list *irq;
97 * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
98 * @irq_list: the list of irq's the VMD one demuxes to.
99 * @srcu: SRCU struct for local synchronization.
100 * @count: number of child IRQs assigned to this vector; used to track
103 struct vmd_irq_list {
104 struct list_head irq_list;
105 struct srcu_struct srcu;
113 void __iomem *cfgbar;
116 struct vmd_irq_list *irqs;
118 struct pci_sysdata sysdata;
119 struct resource resources[3];
120 struct irq_domain *irq_domain;
128 static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
130 return container_of(bus->sysdata, struct vmd_dev, sysdata);
133 static inline unsigned int index_from_irqs(struct vmd_dev *vmd,
134 struct vmd_irq_list *irqs)
136 return irqs - vmd->irqs;
140 * Drivers managing a device in a VMD domain allocate their own IRQs as before,
141 * but the MSI entry for the hardware it's driving will be programmed with a
142 * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its
143 * domain into one of its own, and the VMD driver de-muxes these for the
144 * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations
145 * and irq_chip to set this up.
147 static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
149 struct vmd_irq *vmdirq = data->chip_data;
150 struct vmd_irq_list *irq = vmdirq->irq;
151 struct vmd_dev *vmd = irq_data_get_irq_handler_data(data);
153 memset(msg, 0, sizeof(*msg));
154 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
155 msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
156 msg->arch_addr_lo.destid_0_7 = index_from_irqs(vmd, irq);
160 * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
162 static void vmd_irq_enable(struct irq_data *data)
164 struct vmd_irq *vmdirq = data->chip_data;
167 raw_spin_lock_irqsave(&list_lock, flags);
168 WARN_ON(vmdirq->enabled);
169 list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
170 vmdirq->enabled = true;
171 raw_spin_unlock_irqrestore(&list_lock, flags);
173 data->chip->irq_unmask(data);
176 static void vmd_irq_disable(struct irq_data *data)
178 struct vmd_irq *vmdirq = data->chip_data;
181 data->chip->irq_mask(data);
183 raw_spin_lock_irqsave(&list_lock, flags);
184 if (vmdirq->enabled) {
185 list_del_rcu(&vmdirq->node);
186 vmdirq->enabled = false;
188 raw_spin_unlock_irqrestore(&list_lock, flags);
192 * XXX: Stubbed until we develop acceptable way to not create conflicts with
193 * other devices sharing the same vector.
195 static int vmd_irq_set_affinity(struct irq_data *data,
196 const struct cpumask *dest, bool force)
201 static struct irq_chip vmd_msi_controller = {
203 .irq_enable = vmd_irq_enable,
204 .irq_disable = vmd_irq_disable,
205 .irq_compose_msi_msg = vmd_compose_msi_msg,
206 .irq_set_affinity = vmd_irq_set_affinity,
209 static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
210 msi_alloc_info_t *arg)
216 * XXX: We can be even smarter selecting the best IRQ once we solve the
219 static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
224 if (vmd->msix_count == 1 + vmd->first_vec)
225 return &vmd->irqs[vmd->first_vec];
228 * White list for fast-interrupt handlers. All others will share the
229 * "slow" interrupt vector.
231 switch (msi_desc_to_pci_dev(desc)->class) {
232 case PCI_CLASS_STORAGE_EXPRESS:
235 return &vmd->irqs[vmd->first_vec];
238 raw_spin_lock_irqsave(&list_lock, flags);
239 best = vmd->first_vec + 1;
240 for (i = best; i < vmd->msix_count; i++)
241 if (vmd->irqs[i].count < vmd->irqs[best].count)
243 vmd->irqs[best].count++;
244 raw_spin_unlock_irqrestore(&list_lock, flags);
246 return &vmd->irqs[best];
249 static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
250 unsigned int virq, irq_hw_number_t hwirq,
251 msi_alloc_info_t *arg)
253 struct msi_desc *desc = arg->desc;
254 struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
255 struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
256 unsigned int index, vector;
261 INIT_LIST_HEAD(&vmdirq->node);
262 vmdirq->irq = vmd_next_irq(vmd, desc);
264 index = index_from_irqs(vmd, vmdirq->irq);
265 vector = pci_irq_vector(vmd->dev, index);
267 irq_domain_set_info(domain, virq, vector, info->chip, vmdirq,
268 handle_untracked_irq, vmd, NULL);
272 static void vmd_msi_free(struct irq_domain *domain,
273 struct msi_domain_info *info, unsigned int virq)
275 struct vmd_irq *vmdirq = irq_get_chip_data(virq);
278 synchronize_srcu(&vmdirq->irq->srcu);
280 /* XXX: Potential optimization to rebalance */
281 raw_spin_lock_irqsave(&list_lock, flags);
282 vmdirq->irq->count--;
283 raw_spin_unlock_irqrestore(&list_lock, flags);
288 static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
289 int nvec, msi_alloc_info_t *arg)
291 struct pci_dev *pdev = to_pci_dev(dev);
292 struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
294 if (nvec > vmd->msix_count)
295 return vmd->msix_count;
297 memset(arg, 0, sizeof(*arg));
301 static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
306 static struct msi_domain_ops vmd_msi_domain_ops = {
307 .get_hwirq = vmd_get_hwirq,
308 .msi_init = vmd_msi_init,
309 .msi_free = vmd_msi_free,
310 .msi_prepare = vmd_msi_prepare,
311 .set_desc = vmd_set_desc,
314 static struct msi_domain_info vmd_msi_domain_info = {
315 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
317 .ops = &vmd_msi_domain_ops,
318 .chip = &vmd_msi_controller,
321 static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
325 pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, ®);
326 reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
327 (reg | VMCONFIG_MSI_REMAP);
328 pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
331 static int vmd_create_irq_domain(struct vmd_dev *vmd)
333 struct fwnode_handle *fn;
335 fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
339 vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL);
340 if (!vmd->irq_domain) {
341 irq_domain_free_fwnode(fn);
348 static void vmd_remove_irq_domain(struct vmd_dev *vmd)
351 * Some production BIOS won't enable remapping between soft reboots.
352 * Ensure remapping is restored before unloading the driver.
354 if (!vmd->msix_count)
355 vmd_set_msi_remapping(vmd, true);
357 if (vmd->irq_domain) {
358 struct fwnode_handle *fn = vmd->irq_domain->fwnode;
360 irq_domain_remove(vmd->irq_domain);
361 irq_domain_free_fwnode(fn);
365 static void __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
366 unsigned int devfn, int reg, int len)
368 unsigned int busnr_ecam = bus->number - vmd->busn_start;
369 u32 offset = PCIE_ECAM_OFFSET(busnr_ecam, devfn, reg);
371 if (offset + len >= resource_size(&vmd->dev->resource[VMD_CFGBAR]))
374 return vmd->cfgbar + offset;
378 * CPU may deadlock if config space is not serialized on some versions of this
379 * hardware, so all config space access is done under a spinlock.
381 static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
384 struct vmd_dev *vmd = vmd_from_bus(bus);
385 void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
392 spin_lock_irqsave(&vmd->cfg_lock, flags);
395 *value = readb(addr);
398 *value = readw(addr);
401 *value = readl(addr);
407 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
412 * VMD h/w converts non-posted config writes to posted memory writes. The
413 * read-back in this function forces the completion so it returns only after
414 * the config space was written, as expected.
416 static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
419 struct vmd_dev *vmd = vmd_from_bus(bus);
420 void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
427 spin_lock_irqsave(&vmd->cfg_lock, flags);
445 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
449 static struct pci_ops vmd_ops = {
450 .read = vmd_pci_read,
451 .write = vmd_pci_write,
455 static struct acpi_device *vmd_acpi_find_companion(struct pci_dev *pci_dev)
457 struct pci_host_bridge *bridge;
460 if (pci_dev->bus->ops != &vmd_ops)
463 bridge = pci_find_host_bridge(pci_dev->bus);
464 busnr = pci_dev->bus->number - bridge->bus->number;
466 * The address computation below is only applicable to relative bus
472 addr = (busnr << 24) | ((u32)pci_dev->devfn << 16) | 0x8000FFFFU;
474 dev_dbg(&pci_dev->dev, "Looking for ACPI companion (address 0x%x)\n",
477 return acpi_find_child_device(ACPI_COMPANION(bridge->dev.parent), addr,
481 static bool hook_installed;
483 static void vmd_acpi_begin(void)
485 if (pci_acpi_set_companion_lookup_hook(vmd_acpi_find_companion))
488 hook_installed = true;
491 static void vmd_acpi_end(void)
496 pci_acpi_clear_companion_lookup_hook();
497 hook_installed = false;
500 static inline void vmd_acpi_begin(void) { }
501 static inline void vmd_acpi_end(void) { }
502 #endif /* CONFIG_ACPI */
504 static void vmd_attach_resources(struct vmd_dev *vmd)
506 vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
507 vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2];
510 static void vmd_detach_resources(struct vmd_dev *vmd)
512 vmd->dev->resource[VMD_MEMBAR1].child = NULL;
513 vmd->dev->resource[VMD_MEMBAR2].child = NULL;
517 * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
518 * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower
519 * 16 bits are the PCI Segment Group (domain) number. Other bits are
520 * currently reserved.
522 static int vmd_find_free_domain(void)
525 struct pci_bus *bus = NULL;
527 while ((bus = pci_find_next_bus(bus)) != NULL)
528 domain = max_t(int, domain, pci_domain_nr(bus));
532 static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint,
533 resource_size_t *offset1,
534 resource_size_t *offset2)
536 struct pci_dev *dev = vmd->dev;
543 ret = pci_read_config_dword(dev, PCI_REG_VMLOCK, &vmlock);
544 if (ret || vmlock == ~0)
547 if (MB2_SHADOW_EN(vmlock)) {
548 void __iomem *membar2;
550 membar2 = pci_iomap(dev, VMD_MEMBAR2, 0);
553 phys1 = readq(membar2 + MB2_SHADOW_OFFSET);
554 phys2 = readq(membar2 + MB2_SHADOW_OFFSET + 8);
555 pci_iounmap(dev, membar2);
559 /* Hypervisor-Emulated Vendor-Specific Capability */
560 int pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
563 pci_read_config_dword(dev, pos + 4, ®);
566 if (pos && reg == 0x53484457) {
567 pci_read_config_dword(dev, pos + 8, ®);
568 pci_read_config_dword(dev, pos + 12, ®u);
569 phys1 = (u64) regu << 32 | reg;
571 pci_read_config_dword(dev, pos + 16, ®);
572 pci_read_config_dword(dev, pos + 20, ®u);
573 phys2 = (u64) regu << 32 | reg;
578 *offset1 = dev->resource[VMD_MEMBAR1].start -
579 (phys1 & PCI_BASE_ADDRESS_MEM_MASK);
580 *offset2 = dev->resource[VMD_MEMBAR2].start -
581 (phys2 & PCI_BASE_ADDRESS_MEM_MASK);
586 static int vmd_get_bus_number_start(struct vmd_dev *vmd)
588 struct pci_dev *dev = vmd->dev;
591 pci_read_config_word(dev, PCI_REG_VMCAP, ®);
592 if (BUS_RESTRICT_CAP(reg)) {
593 pci_read_config_word(dev, PCI_REG_VMCONFIG, ®);
595 switch (BUS_RESTRICT_CFG(reg)) {
600 vmd->busn_start = 128;
603 vmd->busn_start = 224;
606 pci_err(dev, "Unknown Bus Offset Setting (%d)\n",
607 BUS_RESTRICT_CFG(reg));
615 static irqreturn_t vmd_irq(int irq, void *data)
617 struct vmd_irq_list *irqs = data;
618 struct vmd_irq *vmdirq;
621 idx = srcu_read_lock(&irqs->srcu);
622 list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
623 generic_handle_irq(vmdirq->virq);
624 srcu_read_unlock(&irqs->srcu, idx);
629 static int vmd_alloc_irqs(struct vmd_dev *vmd)
631 struct pci_dev *dev = vmd->dev;
634 vmd->msix_count = pci_msix_vec_count(dev);
635 if (vmd->msix_count < 0)
638 vmd->msix_count = pci_alloc_irq_vectors(dev, vmd->first_vec + 1,
639 vmd->msix_count, PCI_IRQ_MSIX);
640 if (vmd->msix_count < 0)
641 return vmd->msix_count;
643 vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
648 for (i = 0; i < vmd->msix_count; i++) {
649 err = init_srcu_struct(&vmd->irqs[i].srcu);
653 INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
654 err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
655 vmd_irq, IRQF_NO_THREAD,
656 vmd->name, &vmd->irqs[i]);
664 static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
666 struct pci_sysdata *sd = &vmd->sysdata;
667 struct resource *res;
670 LIST_HEAD(resources);
671 resource_size_t offset[2] = {0};
672 resource_size_t membar2_offset = 0x2000;
673 struct pci_bus *child;
677 * Shadow registers may exist in certain VMD device ids which allow
678 * guests to correctly assign host physical addresses to the root ports
679 * and child devices. These registers will either return the host value
680 * or 0, depending on an enable bit in the VMD device.
682 if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
683 membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
684 ret = vmd_get_phys_offsets(vmd, true, &offset[0], &offset[1]);
687 } else if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) {
688 ret = vmd_get_phys_offsets(vmd, false, &offset[0], &offset[1]);
694 * Certain VMD devices may have a root port configuration option which
695 * limits the bus range to between 0-127, 128-255, or 224-255
697 if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
698 ret = vmd_get_bus_number_start(vmd);
703 res = &vmd->dev->resource[VMD_CFGBAR];
704 vmd->resources[0] = (struct resource) {
705 .name = "VMD CFGBAR",
706 .start = vmd->busn_start,
707 .end = vmd->busn_start + (resource_size(res) >> 20) - 1,
708 .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
712 * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can
713 * put 32-bit resources in the window.
715 * There's no hardware reason why a 64-bit window *couldn't*
716 * contain a 32-bit resource, but pbus_size_mem() computes the
717 * bridge window size assuming a 64-bit window will contain no
718 * 32-bit resources. __pci_assign_resource() enforces that
719 * artificial restriction to make sure everything will fit.
721 * The only way we could use a 64-bit non-prefetchable MEMBAR is
722 * if its address is <4GB so that we can convert it to a 32-bit
723 * resource. To be visible to the host OS, all VMD endpoints must
724 * be initially configured by platform BIOS, which includes setting
725 * up these resources. We can assume the device is configured
726 * according to the platform needs.
728 res = &vmd->dev->resource[VMD_MEMBAR1];
729 upper_bits = upper_32_bits(res->end);
730 flags = res->flags & ~IORESOURCE_SIZEALIGN;
732 flags &= ~IORESOURCE_MEM_64;
733 vmd->resources[1] = (struct resource) {
734 .name = "VMD MEMBAR1",
741 res = &vmd->dev->resource[VMD_MEMBAR2];
742 upper_bits = upper_32_bits(res->end);
743 flags = res->flags & ~IORESOURCE_SIZEALIGN;
745 flags &= ~IORESOURCE_MEM_64;
746 vmd->resources[2] = (struct resource) {
747 .name = "VMD MEMBAR2",
748 .start = res->start + membar2_offset,
754 sd->vmd_dev = vmd->dev;
755 sd->domain = vmd_find_free_domain();
759 sd->node = pcibus_to_node(vmd->dev->bus);
762 * Currently MSI remapping must be enabled in guest passthrough mode
763 * due to some missing interrupt remapping plumbing. This is probably
764 * acceptable because the guest is usually CPU-limited and MSI
765 * remapping doesn't become a performance bottleneck.
767 if (iommu_capable(vmd->dev->dev.bus, IOMMU_CAP_INTR_REMAP) ||
768 !(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP) ||
769 offset[0] || offset[1]) {
770 ret = vmd_alloc_irqs(vmd);
774 vmd_set_msi_remapping(vmd, true);
776 ret = vmd_create_irq_domain(vmd);
781 * Override the IRQ domain bus token so the domain can be
782 * distinguished from a regular PCI/MSI domain.
784 irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
786 vmd_set_msi_remapping(vmd, false);
789 pci_add_resource(&resources, &vmd->resources[0]);
790 pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
791 pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
793 vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
794 &vmd_ops, sd, &resources);
796 pci_free_resource_list(&resources);
797 vmd_remove_irq_domain(vmd);
801 vmd_attach_resources(vmd);
803 dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
807 pci_scan_child_bus(vmd->bus);
808 pci_assign_unassigned_bus_resources(vmd->bus);
811 * VMD root buses are virtual and don't return true on pci_is_pcie()
812 * and will fail pcie_bus_configure_settings() early. It can instead be
813 * run on each of the real root ports.
815 list_for_each_entry(child, &vmd->bus->children, node)
816 pcie_bus_configure_settings(child);
818 pci_bus_add_devices(vmd->bus);
822 WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
823 "domain"), "Can't create symlink to domain\n");
827 static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
829 unsigned long features = (unsigned long) id->driver_data;
833 if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
836 vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL);
841 vmd->instance = ida_simple_get(&vmd_instance_ida, 0, 0, GFP_KERNEL);
842 if (vmd->instance < 0)
843 return vmd->instance;
845 vmd->name = kasprintf(GFP_KERNEL, "vmd%d", vmd->instance);
848 goto out_release_instance;
851 err = pcim_enable_device(dev);
853 goto out_release_instance;
855 vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0);
858 goto out_release_instance;
862 if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) &&
863 dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) {
865 goto out_release_instance;
868 if (features & VMD_FEAT_OFFSET_FIRST_VECTOR)
871 spin_lock_init(&vmd->cfg_lock);
872 pci_set_drvdata(dev, vmd);
873 err = vmd_enable_domain(vmd, features);
875 goto out_release_instance;
877 dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n",
878 vmd->sysdata.domain);
881 out_release_instance:
882 ida_simple_remove(&vmd_instance_ida, vmd->instance);
887 static void vmd_cleanup_srcu(struct vmd_dev *vmd)
891 for (i = 0; i < vmd->msix_count; i++)
892 cleanup_srcu_struct(&vmd->irqs[i].srcu);
895 static void vmd_remove(struct pci_dev *dev)
897 struct vmd_dev *vmd = pci_get_drvdata(dev);
899 sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
900 pci_stop_root_bus(vmd->bus);
901 pci_remove_root_bus(vmd->bus);
902 vmd_cleanup_srcu(vmd);
903 vmd_detach_resources(vmd);
904 vmd_remove_irq_domain(vmd);
905 ida_simple_remove(&vmd_instance_ida, vmd->instance);
909 #ifdef CONFIG_PM_SLEEP
910 static int vmd_suspend(struct device *dev)
912 struct pci_dev *pdev = to_pci_dev(dev);
913 struct vmd_dev *vmd = pci_get_drvdata(pdev);
916 for (i = 0; i < vmd->msix_count; i++)
917 devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
922 static int vmd_resume(struct device *dev)
924 struct pci_dev *pdev = to_pci_dev(dev);
925 struct vmd_dev *vmd = pci_get_drvdata(pdev);
928 for (i = 0; i < vmd->msix_count; i++) {
929 err = devm_request_irq(dev, pci_irq_vector(pdev, i),
930 vmd_irq, IRQF_NO_THREAD,
931 vmd->name, &vmd->irqs[i]);
939 static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
941 static const struct pci_device_id vmd_ids[] = {
942 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),
943 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,},
944 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
945 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
946 VMD_FEAT_HAS_BUS_RESTRICTIONS |
947 VMD_FEAT_CAN_BYPASS_MSI_REMAP,},
948 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f),
949 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
950 VMD_FEAT_HAS_BUS_RESTRICTIONS |
951 VMD_FEAT_OFFSET_FIRST_VECTOR,},
952 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d),
953 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
954 VMD_FEAT_HAS_BUS_RESTRICTIONS |
955 VMD_FEAT_OFFSET_FIRST_VECTOR,},
956 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
957 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
958 VMD_FEAT_HAS_BUS_RESTRICTIONS |
959 VMD_FEAT_OFFSET_FIRST_VECTOR,},
962 MODULE_DEVICE_TABLE(pci, vmd_ids);
964 static struct pci_driver vmd_drv = {
968 .remove = vmd_remove,
970 .pm = &vmd_dev_pm_ops,
973 module_pci_driver(vmd_drv);
975 MODULE_AUTHOR("Intel Corporation");
976 MODULE_LICENSE("GPL v2");
977 MODULE_VERSION("0.6");