return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
}
+static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
+ int allocated)
+{
+ switch(domain->bus_token) {
+ case DOMAIN_BUS_PCI_MSI:
+ case DOMAIN_BUS_VMD_MSI:
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ break;
+ fallthrough;
+ default:
+ return -ENOSPC;
+ }
+
+ /* Let a failed PCI multi MSI allocation retry */
+ if (desc->nvec_used > 1)
+ return 1;
+
+ /* If there was a successful allocation let the caller know */
+ return allocated ? allocated : -ENOSPC;
+}
+
int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
int nvec)
{
struct irq_data *irq_data;
struct msi_desc *desc;
msi_alloc_info_t arg = { };
+ int allocated = 0;
int i, ret, virq;
bool can_reserve;
dev_to_node(dev), &arg, false,
desc->affinity);
if (virq < 0) {
- ret = -ENOSPC;
- if (ops->handle_error)
- ret = ops->handle_error(domain, desc, ret);
- return ret;
+ ret = msi_handle_pci_fail(domain, desc, allocated);
+ goto cleanup;
}
for (i = 0; i < desc->nvec_used; i++) {
irq_set_msi_desc_off(virq, i, desc);
irq_debugfs_copy_devname(virq + i, dev);
}
+ allocated++;
}
can_reserve = msi_check_reservation_mode(domain, info, dev);