1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2014 Intel Corp.
4 * Author: Jiang Liu <jiang.liu@linux.intel.com>
6 * This file is licensed under GPLv2.
8 * This file contains common code to support Message Signaled Interrupts for
9 * PCI compatible and non PCI compatible devices.
11 #include <linux/types.h>
12 #include <linux/device.h>
13 #include <linux/irq.h>
14 #include <linux/irqdomain.h>
15 #include <linux/msi.h>
16 #include <linux/slab.h>
17 #include <linux/sysfs.h>
18 #include <linux/pci.h>
20 #include "internals.h"
22 static inline int msi_sysfs_create_group(struct device *dev);
23 #define dev_to_msi_list(dev) (&(dev)->msi.data->list)
26 * msi_alloc_desc - Allocate an initialized msi_desc
27 * @dev: Pointer to the device for which this is allocated
28 * @nvec: The number of vectors used in this entry
29 * @affinity: Optional pointer to an affinity mask array size of @nvec
31 * If @affinity is not %NULL then an affinity array[@nvec] is allocated
32 * and the affinity masks and flags from @affinity are copied.
34 * Return: pointer to allocated &msi_desc on success or %NULL on failure
36 static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec,
37 const struct irq_affinity_desc *affinity)
39 struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
44 INIT_LIST_HEAD(&desc->list);
46 desc->nvec_used = nvec;
48 desc->affinity = kmemdup(affinity, nvec * sizeof(*desc->affinity), GFP_KERNEL);
49 if (!desc->affinity) {
57 static void msi_free_desc(struct msi_desc *desc)
59 kfree(desc->affinity);
64 * msi_add_msi_desc - Allocate and initialize a MSI descriptor
65 * @dev: Pointer to the device for which the descriptor is allocated
66 * @init_desc: Pointer to an MSI descriptor to initialize the new descriptor
68 * Return: 0 on success or an appropriate failure code.
70 int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc)
72 struct msi_desc *desc;
74 lockdep_assert_held(&dev->msi.data->mutex);
76 desc = msi_alloc_desc(dev, init_desc->nvec_used, init_desc->affinity);
80 /* Copy the MSI index and type specific data to the new descriptor. */
81 desc->msi_index = init_desc->msi_index;
82 desc->pci = init_desc->pci;
84 list_add_tail(&desc->list, &dev->msi.data->list);
89 * msi_add_simple_msi_descs - Allocate and initialize MSI descriptors
90 * @dev: Pointer to the device for which the descriptors are allocated
91 * @index: Index for the first MSI descriptor
92 * @ndesc: Number of descriptors to allocate
94 * Return: 0 on success or an appropriate failure code.
96 static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsigned int ndesc)
98 struct msi_desc *desc, *tmp;
102 lockdep_assert_held(&dev->msi.data->mutex);
104 for (i = 0; i < ndesc; i++) {
105 desc = msi_alloc_desc(dev, 1, NULL);
108 desc->msi_index = index + i;
109 list_add_tail(&desc->list, &list);
111 list_splice_tail(&list, &dev->msi.data->list);
115 list_for_each_entry_safe(desc, tmp, &list, list) {
116 list_del(&desc->list);
123 * msi_free_msi_descs_range - Free MSI descriptors of a device
124 * @dev: Device to free the descriptors
125 * @filter: Descriptor state filter
126 * @first_index: Index to start freeing from
127 * @last_index: Last index to be freed
129 void msi_free_msi_descs_range(struct device *dev, enum msi_desc_filter filter,
130 unsigned int first_index, unsigned int last_index)
132 struct msi_desc *desc;
134 lockdep_assert_held(&dev->msi.data->mutex);
136 msi_for_each_desc(desc, dev, filter) {
138 * Stupid for now to handle MSI device domain until the
139 * storage is switched over to an xarray.
141 if (desc->msi_index < first_index || desc->msi_index > last_index)
143 list_del(&desc->list);
148 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
153 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
155 struct msi_desc *entry = irq_get_msi_desc(irq);
157 __get_cached_msi_msg(entry, msg);
159 EXPORT_SYMBOL_GPL(get_cached_msi_msg);
161 static void msi_device_data_release(struct device *dev, void *res)
163 struct msi_device_data *md = res;
165 WARN_ON_ONCE(!list_empty(&md->list));
166 dev->msi.data = NULL;
170 * msi_setup_device_data - Setup MSI device data
171 * @dev: Device for which MSI device data should be set up
173 * Return: 0 on success, appropriate error code otherwise
175 * This can be called more than once for @dev. If the MSI device data is
176 * already allocated the call succeeds. The allocated memory is
177 * automatically released when the device is destroyed.
179 int msi_setup_device_data(struct device *dev)
181 struct msi_device_data *md;
187 md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL);
191 ret = msi_sysfs_create_group(dev);
197 INIT_LIST_HEAD(&md->list);
198 mutex_init(&md->mutex);
205 * msi_lock_descs - Lock the MSI descriptor storage of a device
206 * @dev: Device to operate on
208 void msi_lock_descs(struct device *dev)
210 mutex_lock(&dev->msi.data->mutex);
212 EXPORT_SYMBOL_GPL(msi_lock_descs);
215 * msi_unlock_descs - Unlock the MSI descriptor storage of a device
216 * @dev: Device to operate on
218 void msi_unlock_descs(struct device *dev)
220 /* Clear the next pointer which was cached by the iterator */
221 dev->msi.data->__next = NULL;
222 mutex_unlock(&dev->msi.data->mutex);
224 EXPORT_SYMBOL_GPL(msi_unlock_descs);
226 static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter)
231 case MSI_DESC_NOTASSOCIATED:
233 case MSI_DESC_ASSOCIATED:
240 static struct msi_desc *msi_find_first_desc(struct device *dev, enum msi_desc_filter filter)
242 struct msi_desc *desc;
244 list_for_each_entry(desc, dev_to_msi_list(dev), list) {
245 if (msi_desc_match(desc, filter))
252 * msi_first_desc - Get the first MSI descriptor of a device
253 * @dev: Device to operate on
254 * @filter: Descriptor state filter
256 * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
257 * must be invoked before the call.
259 * Return: Pointer to the first MSI descriptor matching the search
260 * criteria, NULL if none found.
262 struct msi_desc *msi_first_desc(struct device *dev, enum msi_desc_filter filter)
264 struct msi_desc *desc;
266 if (WARN_ON_ONCE(!dev->msi.data))
269 lockdep_assert_held(&dev->msi.data->mutex);
271 desc = msi_find_first_desc(dev, filter);
272 dev->msi.data->__next = desc ? list_next_entry(desc, list) : NULL;
275 EXPORT_SYMBOL_GPL(msi_first_desc);
277 static struct msi_desc *__msi_next_desc(struct device *dev, enum msi_desc_filter filter,
278 struct msi_desc *from)
280 struct msi_desc *desc = from;
282 list_for_each_entry_from(desc, dev_to_msi_list(dev), list) {
283 if (msi_desc_match(desc, filter))
290 * msi_next_desc - Get the next MSI descriptor of a device
291 * @dev: Device to operate on
293 * The first invocation of msi_next_desc() has to be preceeded by a
294 * successful incovation of __msi_first_desc(). Consecutive invocations are
295 * only valid if the previous one was successful. All these operations have
296 * to be done within the same MSI mutex held region.
298 * Return: Pointer to the next MSI descriptor matching the search
299 * criteria, NULL if none found.
301 struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter)
303 struct msi_device_data *data = dev->msi.data;
304 struct msi_desc *desc;
306 if (WARN_ON_ONCE(!data))
309 lockdep_assert_held(&data->mutex);
314 desc = __msi_next_desc(dev, filter, data->__next);
315 dev->msi.data->__next = desc ? list_next_entry(desc, list) : NULL;
318 EXPORT_SYMBOL_GPL(msi_next_desc);
321 * msi_get_virq - Return Linux interrupt number of a MSI interrupt
322 * @dev: Device to operate on
323 * @index: MSI interrupt index to look for (0-based)
325 * Return: The Linux interrupt number on success (> 0), 0 if not found
327 unsigned int msi_get_virq(struct device *dev, unsigned int index)
329 struct msi_desc *desc;
330 unsigned int ret = 0;
336 pcimsi = dev_is_pci(dev) ? to_pci_dev(dev)->msi_enabled : false;
339 msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
340 /* PCI-MSI has only one descriptor for multiple interrupts. */
342 if (index < desc->nvec_used)
343 ret = desc->irq + index;
348 * PCI-MSIX and platform MSI use a descriptor per
351 if (desc->msi_index == index) {
356 msi_unlock_descs(dev);
359 EXPORT_SYMBOL_GPL(msi_get_virq);
362 static struct attribute *msi_dev_attrs[] = {
366 static const struct attribute_group msi_irqs_group = {
368 .attrs = msi_dev_attrs,
371 static inline int msi_sysfs_create_group(struct device *dev)
373 return devm_device_add_group(dev, &msi_irqs_group);
376 static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
379 /* MSI vs. MSIX is per device not per interrupt */
380 bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false;
382 return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
385 static void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc)
387 struct device_attribute *attrs = desc->sysfs_attrs;
393 desc->sysfs_attrs = NULL;
394 for (i = 0; i < desc->nvec_used; i++) {
396 sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
397 kfree(attrs[i].attr.name);
402 static int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc)
404 struct device_attribute *attrs;
407 attrs = kcalloc(desc->nvec_used, sizeof(*attrs), GFP_KERNEL);
411 desc->sysfs_attrs = attrs;
412 for (i = 0; i < desc->nvec_used; i++) {
413 sysfs_attr_init(&attrs[i].attr);
414 attrs[i].attr.name = kasprintf(GFP_KERNEL, "%d", desc->irq + i);
415 if (!attrs[i].attr.name) {
420 attrs[i].attr.mode = 0444;
421 attrs[i].show = msi_mode_show;
423 ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
425 attrs[i].show = NULL;
432 msi_sysfs_remove_desc(dev, desc);
436 #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
438 * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
439 * @dev: The device (PCI, platform etc) which will get sysfs entries
441 int msi_device_populate_sysfs(struct device *dev)
443 struct msi_desc *desc;
446 msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
447 if (desc->sysfs_attrs)
449 ret = msi_sysfs_populate_desc(dev, desc);
457 * msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
458 * @dev: The device (PCI, platform etc) for which to remove
461 void msi_device_destroy_sysfs(struct device *dev)
463 struct msi_desc *desc;
465 msi_for_each_desc(desc, dev, MSI_DESC_ALL)
466 msi_sysfs_remove_desc(dev, desc);
468 #endif /* CONFIG_PCI_MSI_ARCH_FALLBACK */
469 #else /* CONFIG_SYSFS */
470 static inline int msi_sysfs_create_group(struct device *dev) { return 0; }
471 static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; }
472 static inline void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc) { }
473 #endif /* !CONFIG_SYSFS */
475 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
476 static inline void irq_chip_write_msi_msg(struct irq_data *data,
479 data->chip->irq_write_msi_msg(data, msg);
482 static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
484 struct msi_domain_info *info = domain->host_data;
487 * If the MSI provider has messed with the second message and
488 * not advertized that it is level-capable, signal the breakage.
490 WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
491 (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
492 (msg[1].address_lo || msg[1].address_hi || msg[1].data));
496 * msi_domain_set_affinity - Generic affinity setter function for MSI domains
497 * @irq_data: The irq data associated to the interrupt
498 * @mask: The affinity mask to set
499 * @force: Flag to enforce setting (disable online checks)
501 * Intended to be used by MSI interrupt controllers which are
502 * implemented with hierarchical domains.
504 * Return: IRQ_SET_MASK_* result code
506 int msi_domain_set_affinity(struct irq_data *irq_data,
507 const struct cpumask *mask, bool force)
509 struct irq_data *parent = irq_data->parent_data;
510 struct msi_msg msg[2] = { [1] = { }, };
513 ret = parent->chip->irq_set_affinity(parent, mask, force);
514 if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
515 BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
516 msi_check_level(irq_data->domain, msg);
517 irq_chip_write_msi_msg(irq_data, msg);
523 static int msi_domain_activate(struct irq_domain *domain,
524 struct irq_data *irq_data, bool early)
526 struct msi_msg msg[2] = { [1] = { }, };
528 BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
529 msi_check_level(irq_data->domain, msg);
530 irq_chip_write_msi_msg(irq_data, msg);
534 static void msi_domain_deactivate(struct irq_domain *domain,
535 struct irq_data *irq_data)
537 struct msi_msg msg[2];
539 memset(msg, 0, sizeof(msg));
540 irq_chip_write_msi_msg(irq_data, msg);
543 static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
544 unsigned int nr_irqs, void *arg)
546 struct msi_domain_info *info = domain->host_data;
547 struct msi_domain_ops *ops = info->ops;
548 irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
551 if (irq_find_mapping(domain, hwirq) > 0)
554 if (domain->parent) {
555 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
560 for (i = 0; i < nr_irqs; i++) {
561 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
564 for (i--; i > 0; i--)
565 ops->msi_free(domain, info, virq + i);
567 irq_domain_free_irqs_top(domain, virq, nr_irqs);
575 static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
576 unsigned int nr_irqs)
578 struct msi_domain_info *info = domain->host_data;
581 if (info->ops->msi_free) {
582 for (i = 0; i < nr_irqs; i++)
583 info->ops->msi_free(domain, info, virq + i);
585 irq_domain_free_irqs_top(domain, virq, nr_irqs);
588 static const struct irq_domain_ops msi_domain_ops = {
589 .alloc = msi_domain_alloc,
590 .free = msi_domain_free,
591 .activate = msi_domain_activate,
592 .deactivate = msi_domain_deactivate,
595 static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
596 msi_alloc_info_t *arg)
601 static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
602 int nvec, msi_alloc_info_t *arg)
604 memset(arg, 0, sizeof(*arg));
608 static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
609 struct msi_desc *desc)
614 static int msi_domain_ops_init(struct irq_domain *domain,
615 struct msi_domain_info *info,
616 unsigned int virq, irq_hw_number_t hwirq,
617 msi_alloc_info_t *arg)
619 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
621 if (info->handler && info->handler_name) {
622 __irq_set_handler(virq, info->handler, 0, info->handler_name);
623 if (info->handler_data)
624 irq_set_handler_data(virq, info->handler_data);
629 static int msi_domain_ops_check(struct irq_domain *domain,
630 struct msi_domain_info *info,
636 static struct msi_domain_ops msi_domain_ops_default = {
637 .get_hwirq = msi_domain_ops_get_hwirq,
638 .msi_init = msi_domain_ops_init,
639 .msi_check = msi_domain_ops_check,
640 .msi_prepare = msi_domain_ops_prepare,
641 .set_desc = msi_domain_ops_set_desc,
642 .domain_alloc_irqs = __msi_domain_alloc_irqs,
643 .domain_free_irqs = __msi_domain_free_irqs,
646 static void msi_domain_update_dom_ops(struct msi_domain_info *info)
648 struct msi_domain_ops *ops = info->ops;
651 info->ops = &msi_domain_ops_default;
655 if (ops->domain_alloc_irqs == NULL)
656 ops->domain_alloc_irqs = msi_domain_ops_default.domain_alloc_irqs;
657 if (ops->domain_free_irqs == NULL)
658 ops->domain_free_irqs = msi_domain_ops_default.domain_free_irqs;
660 if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS))
663 if (ops->get_hwirq == NULL)
664 ops->get_hwirq = msi_domain_ops_default.get_hwirq;
665 if (ops->msi_init == NULL)
666 ops->msi_init = msi_domain_ops_default.msi_init;
667 if (ops->msi_check == NULL)
668 ops->msi_check = msi_domain_ops_default.msi_check;
669 if (ops->msi_prepare == NULL)
670 ops->msi_prepare = msi_domain_ops_default.msi_prepare;
671 if (ops->set_desc == NULL)
672 ops->set_desc = msi_domain_ops_default.set_desc;
675 static void msi_domain_update_chip_ops(struct msi_domain_info *info)
677 struct irq_chip *chip = info->chip;
679 BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
680 if (!chip->irq_set_affinity)
681 chip->irq_set_affinity = msi_domain_set_affinity;
685 * msi_create_irq_domain - Create an MSI interrupt domain
686 * @fwnode: Optional fwnode of the interrupt controller
687 * @info: MSI domain info
688 * @parent: Parent irq domain
690 * Return: pointer to the created &struct irq_domain or %NULL on failure
692 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
693 struct msi_domain_info *info,
694 struct irq_domain *parent)
696 struct irq_domain *domain;
698 msi_domain_update_dom_ops(info);
699 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
700 msi_domain_update_chip_ops(info);
702 domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
703 fwnode, &msi_domain_ops, info);
705 if (domain && !domain->name && info->chip)
706 domain->name = info->chip->name;
711 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
712 int nvec, msi_alloc_info_t *arg)
714 struct msi_domain_info *info = domain->host_data;
715 struct msi_domain_ops *ops = info->ops;
718 ret = ops->msi_check(domain, info, dev);
720 ret = ops->msi_prepare(domain, dev, nvec, arg);
725 int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
726 int virq_base, int nvec, msi_alloc_info_t *arg)
728 struct msi_domain_info *info = domain->host_data;
729 struct msi_domain_ops *ops = info->ops;
730 struct msi_desc *desc;
734 for (virq = virq_base; virq < virq_base + nvec; virq++) {
735 desc = msi_alloc_desc(dev, 1, NULL);
741 desc->msi_index = virq;
743 list_add_tail(&desc->list, &dev->msi.data->list);
745 ops->set_desc(arg, desc);
746 ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
750 irq_set_msi_desc(virq, desc);
752 msi_unlock_descs(dev);
756 for (--virq; virq >= virq_base; virq--)
757 irq_domain_free_irqs_common(domain, virq, 1);
758 msi_free_msi_descs_range(dev, MSI_DESC_ALL, virq_base, virq_base + nvec - 1);
759 msi_unlock_descs(dev);
764 * Carefully check whether the device can use reservation mode. If
765 * reservation mode is enabled then the early activation will assign a
766 * dummy vector to the device. If the PCI/MSI device does not support
767 * masking of the entry then this can result in spurious interrupts when
768 * the device driver is not absolutely careful. But even then a malfunction
769 * of the hardware could result in a spurious interrupt on the dummy vector
770 * and render the device unusable. If the entry can be masked then the core
771 * logic will prevent the spurious interrupt and reservation mode can be
772 * used. For now reservation mode is restricted to PCI/MSI.
774 static bool msi_check_reservation_mode(struct irq_domain *domain,
775 struct msi_domain_info *info,
778 struct msi_desc *desc;
780 switch(domain->bus_token) {
781 case DOMAIN_BUS_PCI_MSI:
782 case DOMAIN_BUS_VMD_MSI:
788 if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
791 if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
795 * Checking the first MSI descriptor is sufficient. MSIX supports
796 * masking and MSI does so when the can_mask attribute is set.
798 desc = msi_first_desc(dev, MSI_DESC_ALL);
799 return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
802 static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
805 switch(domain->bus_token) {
806 case DOMAIN_BUS_PCI_MSI:
807 case DOMAIN_BUS_VMD_MSI:
808 if (IS_ENABLED(CONFIG_PCI_MSI))
815 /* Let a failed PCI multi MSI allocation retry */
816 if (desc->nvec_used > 1)
819 /* If there was a successful allocation let the caller know */
820 return allocated ? allocated : -ENOSPC;
823 #define VIRQ_CAN_RESERVE 0x01
824 #define VIRQ_ACTIVATE 0x02
825 #define VIRQ_NOMASK_QUIRK 0x04
827 static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
829 struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
832 if (!(vflags & VIRQ_CAN_RESERVE)) {
833 irqd_clr_can_reserve(irqd);
834 if (vflags & VIRQ_NOMASK_QUIRK)
835 irqd_set_msi_nomask_quirk(irqd);
838 if (!(vflags & VIRQ_ACTIVATE))
841 ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE);
845 * If the interrupt uses reservation mode, clear the activated bit
846 * so request_irq() will assign the final vector.
848 if (vflags & VIRQ_CAN_RESERVE)
849 irqd_clr_activated(irqd);
853 int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
856 struct msi_domain_info *info = domain->host_data;
857 struct msi_domain_ops *ops = info->ops;
858 msi_alloc_info_t arg = { };
859 unsigned int vflags = 0;
860 struct msi_desc *desc;
864 ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
869 * This flag is set by the PCI layer as we need to activate
870 * the MSI entries before the PCI layer enables MSI in the
871 * card. Otherwise the card latches a random msi message.
873 if (info->flags & MSI_FLAG_ACTIVATE_EARLY)
874 vflags |= VIRQ_ACTIVATE;
877 * Interrupt can use a reserved vector and will not occupy
878 * a real device vector until the interrupt is requested.
880 if (msi_check_reservation_mode(domain, info, dev)) {
881 vflags |= VIRQ_CAN_RESERVE;
883 * MSI affinity setting requires a special quirk (X86) when
884 * reservation mode is active.
886 if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
887 vflags |= VIRQ_NOMASK_QUIRK;
890 msi_for_each_desc(desc, dev, MSI_DESC_NOTASSOCIATED) {
891 ops->set_desc(&arg, desc);
893 virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
894 dev_to_node(dev), &arg, false,
897 return msi_handle_pci_fail(domain, desc, allocated);
899 for (i = 0; i < desc->nvec_used; i++) {
900 irq_set_msi_desc_off(virq, i, desc);
901 irq_debugfs_copy_devname(virq + i, dev);
902 ret = msi_init_virq(domain, virq + i, vflags);
906 if (info->flags & MSI_FLAG_DEV_SYSFS) {
907 ret = msi_sysfs_populate_desc(dev, desc);
917 static int msi_domain_add_simple_msi_descs(struct msi_domain_info *info,
919 unsigned int num_descs)
921 if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS))
924 return msi_add_simple_msi_descs(dev, 0, num_descs);
928 * msi_domain_alloc_irqs_descs_locked - Allocate interrupts from a MSI interrupt domain
929 * @domain: The domain to allocate from
930 * @dev: Pointer to device struct of the device for which the interrupts
932 * @nvec: The number of interrupts to allocate
934 * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
935 * pair. Use this for MSI irqdomains which implement their own vector
938 * Return: %0 on success or an error code.
940 int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev,
943 struct msi_domain_info *info = domain->host_data;
944 struct msi_domain_ops *ops = info->ops;
947 lockdep_assert_held(&dev->msi.data->mutex);
949 ret = msi_domain_add_simple_msi_descs(info, dev, nvec);
953 ret = ops->domain_alloc_irqs(domain, dev, nvec);
955 msi_domain_free_irqs_descs_locked(domain, dev);
960 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
961 * @domain: The domain to allocate from
962 * @dev: Pointer to device struct of the device for which the interrupts
964 * @nvec: The number of interrupts to allocate
966 * Return: %0 on success or an error code.
968 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec)
973 ret = msi_domain_alloc_irqs_descs_locked(domain, dev, nvec);
974 msi_unlock_descs(dev);
978 void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
980 struct msi_domain_info *info = domain->host_data;
981 struct irq_data *irqd;
982 struct msi_desc *desc;
985 /* Only handle MSI entries which have an interrupt associated */
986 msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
987 /* Make sure all interrupts are deactivated */
988 for (i = 0; i < desc->nvec_used; i++) {
989 irqd = irq_domain_get_irq_data(domain, desc->irq + i);
990 if (irqd && irqd_is_activated(irqd))
991 irq_domain_deactivate_irq(irqd);
994 irq_domain_free_irqs(desc->irq, desc->nvec_used);
995 if (info->flags & MSI_FLAG_DEV_SYSFS)
996 msi_sysfs_remove_desc(dev, desc);
1001 static void msi_domain_free_msi_descs(struct msi_domain_info *info,
1004 if (info->flags & MSI_FLAG_FREE_MSI_DESCS)
1005 msi_free_msi_descs(dev);
1009 * msi_domain_free_irqs_descs_locked - Free interrupts from a MSI interrupt @domain associated to @dev
1010 * @domain: The domain to managing the interrupts
1011 * @dev: Pointer to device struct of the device for which the interrupts
1014 * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
1015 * pair. Use this for MSI irqdomains which implement their own vector
1018 void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev)
1020 struct msi_domain_info *info = domain->host_data;
1021 struct msi_domain_ops *ops = info->ops;
1023 lockdep_assert_held(&dev->msi.data->mutex);
1025 ops->domain_free_irqs(domain, dev);
1026 msi_domain_free_msi_descs(info, dev);
1030 * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated to @dev
1031 * @domain: The domain to managing the interrupts
1032 * @dev: Pointer to device struct of the device for which the interrupts
1035 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
1037 msi_lock_descs(dev);
1038 msi_domain_free_irqs_descs_locked(domain, dev);
1039 msi_unlock_descs(dev);
1043 * msi_get_domain_info - Get the MSI interrupt domain info for @domain
1044 * @domain: The interrupt domain to retrieve data from
1046 * Return: the pointer to the msi_domain_info stored in @domain->host_data.
1048 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
1050 return (struct msi_domain_info *)domain->host_data;
1053 #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */