1 // SPDX-License-Identifier: GPL-2.0-only
3 * PCIe host controller driver for Xilinx XDMA PCIe Bridge
5 * Copyright (C) 2023 Xilinx, Inc. All rights reserved.
7 #include <linux/bitfield.h>
8 #include <linux/interrupt.h>
10 #include <linux/irqdomain.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/msi.h>
14 #include <linux/of_address.h>
15 #include <linux/of_pci.h>
18 #include "pcie-xilinx-common.h"
20 /* Register definitions */
21 #define XILINX_PCIE_DMA_REG_IDR 0x00000138
22 #define XILINX_PCIE_DMA_REG_IMR 0x0000013c
23 #define XILINX_PCIE_DMA_REG_PSCR 0x00000144
24 #define XILINX_PCIE_DMA_REG_RPSC 0x00000148
25 #define XILINX_PCIE_DMA_REG_MSIBASE1 0x0000014c
26 #define XILINX_PCIE_DMA_REG_MSIBASE2 0x00000150
27 #define XILINX_PCIE_DMA_REG_RPEFR 0x00000154
28 #define XILINX_PCIE_DMA_REG_IDRN 0x00000160
29 #define XILINX_PCIE_DMA_REG_IDRN_MASK 0x00000164
30 #define XILINX_PCIE_DMA_REG_MSI_LOW 0x00000170
31 #define XILINX_PCIE_DMA_REG_MSI_HI 0x00000174
32 #define XILINX_PCIE_DMA_REG_MSI_LOW_MASK 0x00000178
33 #define XILINX_PCIE_DMA_REG_MSI_HI_MASK 0x0000017c
35 #define IMR(x) BIT(XILINX_PCIE_INTR_ ##x)
37 #define XILINX_PCIE_INTR_IMR_ALL_MASK \
57 #define XILINX_PCIE_DMA_IMR_ALL_MASK 0x0ff30fe9
58 #define XILINX_PCIE_DMA_IDR_ALL_MASK 0xffffffff
59 #define XILINX_PCIE_DMA_IDRN_MASK GENMASK(19, 16)
61 /* Root Port Error Register definitions */
62 #define XILINX_PCIE_DMA_RPEFR_ERR_VALID BIT(18)
63 #define XILINX_PCIE_DMA_RPEFR_REQ_ID GENMASK(15, 0)
64 #define XILINX_PCIE_DMA_RPEFR_ALL_MASK 0xffffffff
66 /* Root Port Interrupt Register definitions */
67 #define XILINX_PCIE_DMA_IDRN_SHIFT 16
69 /* Root Port Status/control Register definitions */
70 #define XILINX_PCIE_DMA_REG_RPSC_BEN BIT(0)
72 /* Phy Status/Control Register definitions */
73 #define XILINX_PCIE_DMA_REG_PSCR_LNKUP BIT(11)
75 /* Number of MSI IRQs */
76 #define XILINX_NUM_MSI_IRQS 64
79 struct irq_domain *msi_domain;
80 unsigned long *bitmap;
81 struct irq_domain *dev_domain;
82 struct mutex lock; /* Protect bitmap variable */
88 * struct pl_dma_pcie - PCIe port information
89 * @dev: Device pointer
90 * @reg_base: IO Mapped Register Base
91 * @irq: Interrupt number
92 * @cfg: Holds mappings of config space window
93 * @phys_reg_base: Physical address of reg base
94 * @intx_domain: Legacy IRQ domain pointer
95 * @pldma_domain: PL DMA IRQ domain pointer
96 * @resources: Bus Resources
97 * @msi: MSI information
98 * @intx_irq: INTx error interrupt number
99 * @lock: Lock protecting shared register access
103 void __iomem *reg_base;
105 struct pci_config_window *cfg;
106 phys_addr_t phys_reg_base;
107 struct irq_domain *intx_domain;
108 struct irq_domain *pldma_domain;
109 struct list_head resources;
110 struct xilinx_msi msi;
115 static inline u32 pcie_read(struct pl_dma_pcie *port, u32 reg)
117 return readl(port->reg_base + reg);
120 static inline void pcie_write(struct pl_dma_pcie *port, u32 val, u32 reg)
122 writel(val, port->reg_base + reg);
125 static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie *port)
127 return (pcie_read(port, XILINX_PCIE_DMA_REG_PSCR) &
128 XILINX_PCIE_DMA_REG_PSCR_LNKUP) ? true : false;
131 static void xilinx_pl_dma_pcie_clear_err_interrupts(struct pl_dma_pcie *port)
133 unsigned long val = pcie_read(port, XILINX_PCIE_DMA_REG_RPEFR);
135 if (val & XILINX_PCIE_DMA_RPEFR_ERR_VALID) {
136 dev_dbg(port->dev, "Requester ID %lu\n",
137 val & XILINX_PCIE_DMA_RPEFR_REQ_ID);
138 pcie_write(port, XILINX_PCIE_DMA_RPEFR_ALL_MASK,
139 XILINX_PCIE_DMA_REG_RPEFR);
143 static bool xilinx_pl_dma_pcie_valid_device(struct pci_bus *bus,
146 struct pl_dma_pcie *port = bus->sysdata;
148 if (!pci_is_root_bus(bus)) {
150 * Checking whether the link is up is the last line of
151 * defense, and this check is inherently racy by definition.
152 * Sending a PIO request to a downstream device when the link is
153 * down causes an unrecoverable error, and a reset of the entire
154 * PCIe controller will be needed. We can reduce the likelihood
155 * of that unrecoverable error by checking whether the link is
156 * up, but we can't completely prevent it because the link may
157 * go down between the link-up check and the PIO request.
159 if (!xilinx_pl_dma_pcie_link_up(port))
161 } else if (devfn > 0)
162 /* Only one device down on each root port */
168 static void __iomem *xilinx_pl_dma_pcie_map_bus(struct pci_bus *bus,
169 unsigned int devfn, int where)
171 struct pl_dma_pcie *port = bus->sysdata;
173 if (!xilinx_pl_dma_pcie_valid_device(bus, devfn))
176 return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
179 /* PCIe operations */
180 static struct pci_ecam_ops xilinx_pl_dma_pcie_ops = {
182 .map_bus = xilinx_pl_dma_pcie_map_bus,
183 .read = pci_generic_config_read,
184 .write = pci_generic_config_write,
188 static void xilinx_pl_dma_pcie_enable_msi(struct pl_dma_pcie *port)
190 phys_addr_t msi_addr = port->phys_reg_base;
192 pcie_write(port, upper_32_bits(msi_addr), XILINX_PCIE_DMA_REG_MSIBASE1);
193 pcie_write(port, lower_32_bits(msi_addr), XILINX_PCIE_DMA_REG_MSIBASE2);
196 static void xilinx_mask_intx_irq(struct irq_data *data)
198 struct pl_dma_pcie *port = irq_data_get_irq_chip_data(data);
202 mask = BIT(data->hwirq + XILINX_PCIE_DMA_IDRN_SHIFT);
203 raw_spin_lock_irqsave(&port->lock, flags);
204 val = pcie_read(port, XILINX_PCIE_DMA_REG_IDRN_MASK);
205 pcie_write(port, (val & (~mask)), XILINX_PCIE_DMA_REG_IDRN_MASK);
206 raw_spin_unlock_irqrestore(&port->lock, flags);
209 static void xilinx_unmask_intx_irq(struct irq_data *data)
211 struct pl_dma_pcie *port = irq_data_get_irq_chip_data(data);
215 mask = BIT(data->hwirq + XILINX_PCIE_DMA_IDRN_SHIFT);
216 raw_spin_lock_irqsave(&port->lock, flags);
217 val = pcie_read(port, XILINX_PCIE_DMA_REG_IDRN_MASK);
218 pcie_write(port, (val | mask), XILINX_PCIE_DMA_REG_IDRN_MASK);
219 raw_spin_unlock_irqrestore(&port->lock, flags);
222 static struct irq_chip xilinx_leg_irq_chip = {
223 .name = "pl_dma:INTx",
224 .irq_mask = xilinx_mask_intx_irq,
225 .irq_unmask = xilinx_unmask_intx_irq,
228 static int xilinx_pl_dma_pcie_intx_map(struct irq_domain *domain,
229 unsigned int irq, irq_hw_number_t hwirq)
231 irq_set_chip_and_handler(irq, &xilinx_leg_irq_chip, handle_level_irq);
232 irq_set_chip_data(irq, domain->host_data);
233 irq_set_status_flags(irq, IRQ_LEVEL);
238 /* INTx IRQ Domain operations */
239 static const struct irq_domain_ops intx_domain_ops = {
240 .map = xilinx_pl_dma_pcie_intx_map,
243 static irqreturn_t xilinx_pl_dma_pcie_msi_handler_high(int irq, void *args)
245 struct xilinx_msi *msi;
246 unsigned long status;
248 struct pl_dma_pcie *port = args;
252 while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_HI)) != 0) {
253 for_each_set_bit(bit, &status, 32) {
254 pcie_write(port, 1 << bit, XILINX_PCIE_DMA_REG_MSI_HI);
256 virq = irq_find_mapping(msi->dev_domain, bit);
258 generic_handle_irq(virq);
265 static irqreturn_t xilinx_pl_dma_pcie_msi_handler_low(int irq, void *args)
267 struct pl_dma_pcie *port = args;
268 struct xilinx_msi *msi;
269 unsigned long status;
274 while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_LOW)) != 0) {
275 for_each_set_bit(bit, &status, 32) {
276 pcie_write(port, 1 << bit, XILINX_PCIE_DMA_REG_MSI_LOW);
277 virq = irq_find_mapping(msi->dev_domain, bit);
279 generic_handle_irq(virq);
286 static irqreturn_t xilinx_pl_dma_pcie_event_flow(int irq, void *args)
288 struct pl_dma_pcie *port = args;
292 val = pcie_read(port, XILINX_PCIE_DMA_REG_IDR);
293 val &= pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
294 for_each_set_bit(i, &val, 32)
295 generic_handle_domain_irq(port->pldma_domain, i);
297 pcie_write(port, val, XILINX_PCIE_DMA_REG_IDR);
303 [XILINX_PCIE_INTR_ ## x] = { __stringify(x), s }
305 static const struct {
309 _IC(LINK_DOWN, "Link Down"),
310 _IC(HOT_RESET, "Hot reset"),
311 _IC(CFG_TIMEOUT, "ECAM access timeout"),
312 _IC(CORRECTABLE, "Correctable error message"),
313 _IC(NONFATAL, "Non fatal error message"),
314 _IC(FATAL, "Fatal error message"),
315 _IC(SLV_UNSUPP, "Slave unsupported request"),
316 _IC(SLV_UNEXP, "Slave unexpected completion"),
317 _IC(SLV_COMPL, "Slave completion timeout"),
318 _IC(SLV_ERRP, "Slave Error Poison"),
319 _IC(SLV_CMPABT, "Slave Completer Abort"),
320 _IC(SLV_ILLBUR, "Slave Illegal Burst"),
321 _IC(MST_DECERR, "Master decode error"),
322 _IC(MST_SLVERR, "Master slave error"),
325 static irqreturn_t xilinx_pl_dma_pcie_intr_handler(int irq, void *dev_id)
327 struct pl_dma_pcie *port = (struct pl_dma_pcie *)dev_id;
328 struct device *dev = port->dev;
331 d = irq_domain_get_irq_data(port->pldma_domain, irq);
333 case XILINX_PCIE_INTR_CORRECTABLE:
334 case XILINX_PCIE_INTR_NONFATAL:
335 case XILINX_PCIE_INTR_FATAL:
336 xilinx_pl_dma_pcie_clear_err_interrupts(port);
340 if (intr_cause[d->hwirq].str)
341 dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
343 dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq);
349 static struct irq_chip xilinx_msi_irq_chip = {
350 .name = "pl_dma:PCIe MSI",
351 .irq_enable = pci_msi_unmask_irq,
352 .irq_disable = pci_msi_mask_irq,
353 .irq_mask = pci_msi_mask_irq,
354 .irq_unmask = pci_msi_unmask_irq,
357 static struct msi_domain_info xilinx_msi_domain_info = {
358 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
359 MSI_FLAG_MULTI_PCI_MSI),
360 .chip = &xilinx_msi_irq_chip,
363 static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
365 struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(data);
366 phys_addr_t msi_addr = pcie->phys_reg_base;
368 msg->address_lo = lower_32_bits(msi_addr);
369 msg->address_hi = upper_32_bits(msi_addr);
370 msg->data = data->hwirq;
373 static int xilinx_msi_set_affinity(struct irq_data *irq_data,
374 const struct cpumask *mask, bool force)
379 static struct irq_chip xilinx_irq_chip = {
380 .name = "pl_dma:MSI",
381 .irq_compose_msi_msg = xilinx_compose_msi_msg,
382 .irq_set_affinity = xilinx_msi_set_affinity,
385 static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
386 unsigned int nr_irqs, void *args)
388 struct pl_dma_pcie *pcie = domain->host_data;
389 struct xilinx_msi *msi = &pcie->msi;
392 mutex_lock(&msi->lock);
393 bit = bitmap_find_free_region(msi->bitmap, XILINX_NUM_MSI_IRQS,
394 get_count_order(nr_irqs));
396 mutex_unlock(&msi->lock);
400 for (i = 0; i < nr_irqs; i++) {
401 irq_domain_set_info(domain, virq + i, bit + i, &xilinx_irq_chip,
402 domain->host_data, handle_simple_irq,
405 mutex_unlock(&msi->lock);
410 static void xilinx_irq_domain_free(struct irq_domain *domain, unsigned int virq,
411 unsigned int nr_irqs)
413 struct irq_data *data = irq_domain_get_irq_data(domain, virq);
414 struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(data);
415 struct xilinx_msi *msi = &pcie->msi;
417 mutex_lock(&msi->lock);
418 bitmap_release_region(msi->bitmap, data->hwirq,
419 get_count_order(nr_irqs));
420 mutex_unlock(&msi->lock);
423 static const struct irq_domain_ops dev_msi_domain_ops = {
424 .alloc = xilinx_irq_domain_alloc,
425 .free = xilinx_irq_domain_free,
428 static void xilinx_pl_dma_pcie_free_irq_domains(struct pl_dma_pcie *port)
430 struct xilinx_msi *msi = &port->msi;
432 if (port->intx_domain) {
433 irq_domain_remove(port->intx_domain);
434 port->intx_domain = NULL;
437 if (msi->dev_domain) {
438 irq_domain_remove(msi->dev_domain);
439 msi->dev_domain = NULL;
442 if (msi->msi_domain) {
443 irq_domain_remove(msi->msi_domain);
444 msi->msi_domain = NULL;
448 static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port)
450 struct device *dev = port->dev;
451 struct xilinx_msi *msi = &port->msi;
452 int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long);
453 struct fwnode_handle *fwnode = of_node_to_fwnode(port->dev->of_node);
455 msi->dev_domain = irq_domain_add_linear(NULL, XILINX_NUM_MSI_IRQS,
456 &dev_msi_domain_ops, port);
457 if (!msi->dev_domain)
460 msi->msi_domain = pci_msi_create_irq_domain(fwnode,
461 &xilinx_msi_domain_info,
463 if (!msi->msi_domain)
466 mutex_init(&msi->lock);
467 msi->bitmap = kzalloc(size, GFP_KERNEL);
471 raw_spin_lock_init(&port->lock);
472 xilinx_pl_dma_pcie_enable_msi(port);
477 xilinx_pl_dma_pcie_free_irq_domains(port);
478 dev_err(dev, "Failed to allocate MSI IRQ domains\n");
484 * INTx error interrupts are Xilinx controller specific interrupt, used to
485 * notify user about errors such as cfg timeout, slave unsupported requests,
486 * fatal and non fatal error etc.
489 static irqreturn_t xilinx_pl_dma_pcie_intx_flow(int irq, void *args)
493 struct pl_dma_pcie *port = args;
495 val = FIELD_GET(XILINX_PCIE_DMA_IDRN_MASK,
496 pcie_read(port, XILINX_PCIE_DMA_REG_IDRN));
498 for_each_set_bit(i, &val, PCI_NUM_INTX)
499 generic_handle_domain_irq(port->intx_domain, i);
503 static void xilinx_pl_dma_pcie_mask_event_irq(struct irq_data *d)
505 struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d);
508 raw_spin_lock(&port->lock);
509 val = pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
510 val &= ~BIT(d->hwirq);
511 pcie_write(port, val, XILINX_PCIE_DMA_REG_IMR);
512 raw_spin_unlock(&port->lock);
515 static void xilinx_pl_dma_pcie_unmask_event_irq(struct irq_data *d)
517 struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d);
520 raw_spin_lock(&port->lock);
521 val = pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
522 val |= BIT(d->hwirq);
523 pcie_write(port, val, XILINX_PCIE_DMA_REG_IMR);
524 raw_spin_unlock(&port->lock);
527 static struct irq_chip xilinx_pl_dma_pcie_event_irq_chip = {
528 .name = "pl_dma:RC-Event",
529 .irq_mask = xilinx_pl_dma_pcie_mask_event_irq,
530 .irq_unmask = xilinx_pl_dma_pcie_unmask_event_irq,
533 static int xilinx_pl_dma_pcie_event_map(struct irq_domain *domain,
534 unsigned int irq, irq_hw_number_t hwirq)
536 irq_set_chip_and_handler(irq, &xilinx_pl_dma_pcie_event_irq_chip,
538 irq_set_chip_data(irq, domain->host_data);
539 irq_set_status_flags(irq, IRQ_LEVEL);
544 static const struct irq_domain_ops event_domain_ops = {
545 .map = xilinx_pl_dma_pcie_event_map,
549 * xilinx_pl_dma_pcie_init_irq_domain - Initialize IRQ domain
550 * @port: PCIe port information
552 * Return: '0' on success and error value on failure.
554 static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie *port)
556 struct device *dev = port->dev;
557 struct device_node *node = dev->of_node;
558 struct device_node *pcie_intc_node;
562 pcie_intc_node = of_get_child_by_name(node, "interrupt-controller");
563 if (!pcie_intc_node) {
564 dev_err(dev, "No PCIe Intc node found\n");
568 port->pldma_domain = irq_domain_add_linear(pcie_intc_node, 32,
569 &event_domain_ops, port);
570 if (!port->pldma_domain)
573 irq_domain_update_bus_token(port->pldma_domain, DOMAIN_BUS_NEXUS);
575 port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
576 &intx_domain_ops, port);
577 if (!port->intx_domain) {
578 dev_err(dev, "Failed to get a INTx IRQ domain\n");
582 irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
584 ret = xilinx_pl_dma_pcie_init_msi_irq_domain(port);
586 irq_domain_remove(port->intx_domain);
590 of_node_put(pcie_intc_node);
591 raw_spin_lock_init(&port->lock);
596 static int xilinx_pl_dma_pcie_setup_irq(struct pl_dma_pcie *port)
598 struct device *dev = port->dev;
599 struct platform_device *pdev = to_platform_device(dev);
602 port->irq = platform_get_irq(pdev, 0);
606 for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
609 if (!intr_cause[i].str)
612 irq = irq_create_mapping(port->pldma_domain, i);
614 dev_err(dev, "Failed to map interrupt\n");
618 err = devm_request_irq(dev, irq,
619 xilinx_pl_dma_pcie_intr_handler,
620 IRQF_SHARED | IRQF_NO_THREAD,
621 intr_cause[i].sym, port);
623 dev_err(dev, "Failed to request IRQ %d\n", irq);
628 port->intx_irq = irq_create_mapping(port->pldma_domain,
629 XILINX_PCIE_INTR_INTX);
630 if (!port->intx_irq) {
631 dev_err(dev, "Failed to map INTx interrupt\n");
635 err = devm_request_irq(dev, port->intx_irq, xilinx_pl_dma_pcie_intx_flow,
636 IRQF_SHARED | IRQF_NO_THREAD, NULL, port);
638 dev_err(dev, "Failed to request INTx IRQ %d\n", port->intx_irq);
642 err = devm_request_irq(dev, port->irq, xilinx_pl_dma_pcie_event_flow,
643 IRQF_SHARED | IRQF_NO_THREAD, NULL, port);
645 dev_err(dev, "Failed to request event IRQ %d\n", port->irq);
652 static void xilinx_pl_dma_pcie_init_port(struct pl_dma_pcie *port)
654 if (xilinx_pl_dma_pcie_link_up(port))
655 dev_info(port->dev, "PCIe Link is UP\n");
657 dev_info(port->dev, "PCIe Link is DOWN\n");
659 /* Disable all interrupts */
660 pcie_write(port, ~XILINX_PCIE_DMA_IDR_ALL_MASK,
661 XILINX_PCIE_DMA_REG_IMR);
663 /* Clear pending interrupts */
664 pcie_write(port, pcie_read(port, XILINX_PCIE_DMA_REG_IDR) &
665 XILINX_PCIE_DMA_IMR_ALL_MASK,
666 XILINX_PCIE_DMA_REG_IDR);
668 /* Needed for MSI DECODE MODE */
669 pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK,
670 XILINX_PCIE_DMA_REG_MSI_LOW_MASK);
671 pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK,
672 XILINX_PCIE_DMA_REG_MSI_HI_MASK);
674 /* Set the Bridge enable bit */
675 pcie_write(port, pcie_read(port, XILINX_PCIE_DMA_REG_RPSC) |
676 XILINX_PCIE_DMA_REG_RPSC_BEN,
677 XILINX_PCIE_DMA_REG_RPSC);
680 static int xilinx_request_msi_irq(struct pl_dma_pcie *port)
682 struct device *dev = port->dev;
683 struct platform_device *pdev = to_platform_device(dev);
686 port->msi.irq_msi0 = platform_get_irq_byname(pdev, "msi0");
687 if (port->msi.irq_msi0 <= 0)
688 return port->msi.irq_msi0;
690 ret = devm_request_irq(dev, port->msi.irq_msi0, xilinx_pl_dma_pcie_msi_handler_low,
691 IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl",
694 dev_err(dev, "Failed to register interrupt\n");
698 port->msi.irq_msi1 = platform_get_irq_byname(pdev, "msi1");
699 if (port->msi.irq_msi1 <= 0)
700 return port->msi.irq_msi1;
702 ret = devm_request_irq(dev, port->msi.irq_msi1, xilinx_pl_dma_pcie_msi_handler_high,
703 IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl",
706 dev_err(dev, "Failed to register interrupt\n");
713 static int xilinx_pl_dma_pcie_parse_dt(struct pl_dma_pcie *port,
714 struct resource *bus_range)
716 struct device *dev = port->dev;
717 struct platform_device *pdev = to_platform_device(dev);
718 struct resource *res;
721 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
723 dev_err(dev, "Missing \"reg\" property\n");
726 port->phys_reg_base = res->start;
728 port->cfg = pci_ecam_create(dev, res, bus_range, &xilinx_pl_dma_pcie_ops);
729 if (IS_ERR(port->cfg))
730 return PTR_ERR(port->cfg);
732 port->reg_base = port->cfg->win;
734 err = xilinx_request_msi_irq(port);
736 pci_ecam_free(port->cfg);
743 static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev)
745 struct device *dev = &pdev->dev;
746 struct pl_dma_pcie *port;
747 struct pci_host_bridge *bridge;
748 struct resource_entry *bus;
751 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
755 port = pci_host_bridge_priv(bridge);
759 bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
763 err = xilinx_pl_dma_pcie_parse_dt(port, bus->res);
765 dev_err(dev, "Parsing DT failed\n");
769 xilinx_pl_dma_pcie_init_port(port);
771 err = xilinx_pl_dma_pcie_init_irq_domain(port);
775 err = xilinx_pl_dma_pcie_setup_irq(port);
777 bridge->sysdata = port;
778 bridge->ops = &xilinx_pl_dma_pcie_ops.pci_ops;
780 err = pci_host_probe(bridge);
782 goto err_host_bridge;
787 xilinx_pl_dma_pcie_free_irq_domains(port);
790 pci_ecam_free(port->cfg);
794 static const struct of_device_id xilinx_pl_dma_pcie_of_match[] = {
796 .compatible = "xlnx,xdma-host-3.00",
801 static struct platform_driver xilinx_pl_dma_pcie_driver = {
803 .name = "xilinx-xdma-pcie",
804 .of_match_table = xilinx_pl_dma_pcie_of_match,
805 .suppress_bind_attrs = true,
807 .probe = xilinx_pl_dma_pcie_probe,
810 builtin_platform_driver(xilinx_pl_dma_pcie_driver);