...
}
- static int dev_suspend(struct pci_dev *dev, pm_message_t state)
+ static int dev_suspend(struct device *dev)
{
...
return 0;
}
- static int dev_resume(struct pci_dev *dev)
+ static int dev_resume(struct device *dev)
{
...
.id_table = dev_id_table,
.probe = dev_probe,
.remove = dev_remove,
- .suspend = dev_suspend,
- .resume = dev_resume,
+ .driver.pm = &dev_pm_ops,
.shutdown = dev_shutdown,
.sriov_configure = dev_sriov_configure,
};
mmap() through files in /proc/bus/pci, platforms may also set HAVE_PCI_MMAP.
Alternatively, platforms which set HAVE_PCI_MMAP may provide their own
-implementation of pci_mmap_page_range() instead of defining
+implementation of pci_mmap_resource_range() instead of defining
ARCH_GENERIC_PCI_MMAP_RESOURCE.
Platforms which support write-combining maps of PCI resources must define
maxItems: 5
interrupts:
- maxItems: 1
+ minItems: 1
+ maxItems: 8
interrupt-names:
- items:
- - const: msi
+ minItems: 1
+ maxItems: 8
# Common definitions for clocks, clock-names and reset.
# Platform constraints are described later.
- resets
- reset-names
+ # Newer chipsets support either 1 or 8 MSI vectors
+ # On older chipsets it's always 1 MSI vector
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pcie-msm8996
+ - qcom,pcie-sc7280
+ - qcom,pcie-sc8180x
+ - qcom,pcie-sdm845
+ - qcom,pcie-sm8150
+ - qcom,pcie-sm8250
+ - qcom,pcie-sm8450-pcie0
+ - qcom,pcie-sm8450-pcie1
+ then:
+ oneOf:
+ - properties:
+ interrupts:
+ maxItems: 1
+ interrupt-names:
+ items:
+ - const: msi
+ - properties:
+ interrupts:
+ minItems: 8
+ interrupt-names:
+ items:
+ - const: msi0
+ - const: msi1
+ - const: msi2
+ - const: msi3
+ - const: msi4
+ - const: msi5
+ - const: msi6
+ - const: msi7
+ else:
+ properties:
+ interrupts:
+ maxItems: 1
+ interrupt-names:
+ items:
+ - const: msi
+
unevaluatedProperties: false
examples:
struct zpci_dev *zdev;
int devfn, rc, ret = 0;
- if (!zbus->function[0])
- return 0;
-
for (devfn = 0; devfn < ZPCI_FUNCTIONS_PER_BUS; devfn++) {
zdev = zbus->function[devfn];
if (zdev && zdev->state == ZPCI_FN_STATE_CONFIGURED) {
/* zpci_bus_create_pci_bus - Create the PCI bus associated with this zbus
* @zbus: the zbus holding the zdevices
- * @f0: function 0 of the bus
+ * @fr: PCI root function that will determine the bus's domain, and bus speeed
* @ops: the pci operations
*
- * Function zero is taken as a parameter as this is used to determine the
- * domain, multifunction property and maximum bus speed of the entire bus.
+ * The PCI function @fr determines the domain (its UID), multifunction property
+ * and maximum bus speed of the entire bus.
*
* Return: 0 on success, an error code otherwise
*/
-static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *f0, struct pci_ops *ops)
+static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *fr, struct pci_ops *ops)
{
struct pci_bus *bus;
int domain;
- domain = zpci_alloc_domain((u16)f0->uid);
+ domain = zpci_alloc_domain((u16)fr->uid);
if (domain < 0)
return domain;
zbus->domain_nr = domain;
- zbus->multifunction = f0->rid_available;
- zbus->max_bus_speed = f0->max_bus_speed;
+ zbus->multifunction = fr->rid_available;
+ zbus->max_bus_speed = fr->max_bus_speed;
/*
* Note that the zbus->resources are taken over and zbus->resources
}
}
-/* zpci_bus_create_hotplug_slots - Add hotplug slot(s) for device added to bus
- * @zdev: the zPCI device that was newly added
- *
- * Add the hotplug slot(s) for the newly added PCI function. Normally this is
- * simply the slot for the function itself. If however we are adding the
- * function 0 on a zbus, it might be that we already registered functions on
- * that zbus but could not create their hotplug slots yet so add those now too.
- *
- * Return: 0 on success, an error code otherwise
- */
-static int zpci_bus_create_hotplug_slots(struct zpci_dev *zdev)
-{
- struct zpci_bus *zbus = zdev->zbus;
- int devfn, rc = 0;
-
- rc = zpci_init_slot(zdev);
- if (rc)
- return rc;
- zdev->has_hp_slot = 1;
-
- if (zdev->devfn == 0 && zbus->multifunction) {
- /* Now that function 0 is there we can finally create the
- * hotplug slots for those functions with devfn != 0 that have
- * been parked in zbus->function[] waiting for us to be able to
- * create the PCI bus.
- */
- for (devfn = 1; devfn < ZPCI_FUNCTIONS_PER_BUS; devfn++) {
- zdev = zbus->function[devfn];
- if (zdev && !zdev->has_hp_slot) {
- rc = zpci_init_slot(zdev);
- if (rc)
- return rc;
- zdev->has_hp_slot = 1;
- }
- }
-
- }
-
- return rc;
-}
-
static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
{
int rc = -EINVAL;
pr_err("devfn %04x is already assigned\n", zdev->devfn);
return rc;
}
+
zdev->zbus = zbus;
zbus->function[zdev->devfn] = zdev;
zpci_nb_devices++;
- if (zbus->bus) {
- if (zbus->multifunction && !zdev->rid_available) {
- WARN_ONCE(1, "rid_available not set for multifunction\n");
- goto error;
- }
-
- zpci_bus_create_hotplug_slots(zdev);
- } else {
- /* Hotplug slot will be created once function 0 appears */
- zbus->multifunction = 1;
+ if (zbus->multifunction && !zdev->rid_available) {
+ WARN_ONCE(1, "rid_available not set for multifunction\n");
+ goto error;
}
+ rc = zpci_init_slot(zdev);
+ if (rc)
+ goto error;
+ zdev->has_hp_slot = 1;
return 0;
return -ENOMEM;
}
- if (zdev->devfn == 0) {
+ if (!zbus->bus) {
+ /* The UID of the first PCI function registered with a zpci_bus
+ * is used as the domain number for that bus. Currently there
+ * is exactly one zpci_bus per domain.
+ */
rc = zpci_bus_create_pci_bus(zbus, zdev, ops);
if (rc)
goto error;
#define HAVE_PCI_MMAP
#define arch_can_pci_mmap_io() 1
#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#define get_pci_unmapped_area get_fb_unmapped_area
#endif /* CONFIG_SPARC64 */
}
/* Platform support for /proc/bus/pci/X/Y mmap()s. */
-
-/* If the user uses a host-bridge as the PCI device, he may use
- * this to perform a raw mmap() of the I/O or MEM space behind
- * that controller.
- *
- * This can be useful for execution of x86 PCI bios initialization code
- * on a PCI card, like the xfree86 int10 stuff does.
- */
-static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state)
+int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
- unsigned long space_size, user_offset, user_size;
-
- if (mmap_state == pci_mmap_io) {
- space_size = resource_size(&pbm->io_space);
- } else {
- space_size = resource_size(&pbm->mem_space);
- }
-
- /* Make sure the request is in range. */
- user_offset = vma->vm_pgoff << PAGE_SHIFT;
- user_size = vma->vm_end - vma->vm_start;
-
- if (user_offset >= space_size ||
- (user_offset + user_size) > space_size)
- return -EINVAL;
-
- if (mmap_state == pci_mmap_io) {
- vma->vm_pgoff = (pbm->io_space.start +
- user_offset) >> PAGE_SHIFT;
- } else {
- vma->vm_pgoff = (pbm->mem_space.start +
- user_offset) >> PAGE_SHIFT;
- }
-
- return 0;
-}
-
-/* Adjust vm_pgoff of VMA such that it is the physical page offset
- * corresponding to the 32-bit pci bus offset for DEV requested by the user.
- *
- * Basically, the user finds the base address for his device which he wishes
- * to mmap. They read the 32-bit value from the config space base register,
- * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
- * offset parameter of mmap on /proc/bus/pci/XXX for that device.
- *
- * Returns negative error code on failure, zero on success.
- */
-static int __pci_mmap_make_offset(struct pci_dev *pdev,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state)
-{
- unsigned long user_paddr, user_size;
- int i, err;
-
- /* First compute the physical address in vma->vm_pgoff,
- * making sure the user offset is within range in the
- * appropriate PCI space.
- */
- err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state);
- if (err)
- return err;
-
- /* If this is a mapping on a host bridge, any address
- * is OK.
- */
- if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
- return err;
-
- /* Otherwise make sure it's in the range for one of the
- * device's resources.
- */
- user_paddr = vma->vm_pgoff << PAGE_SHIFT;
- user_size = vma->vm_end - vma->vm_start;
+ resource_size_t ioaddr = pci_resource_start(pdev, bar);
- for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
- struct resource *rp = &pdev->resource[i];
- resource_size_t aligned_end;
-
- /* Active? */
- if (!rp->flags)
- continue;
-
- /* Same type? */
- if (i == PCI_ROM_RESOURCE) {
- if (mmap_state != pci_mmap_mem)
- continue;
- } else {
- if ((mmap_state == pci_mmap_io &&
- (rp->flags & IORESOURCE_IO) == 0) ||
- (mmap_state == pci_mmap_mem &&
- (rp->flags & IORESOURCE_MEM) == 0))
- continue;
- }
-
- /* Align the resource end to the next page address.
- * PAGE_SIZE intentionally added instead of (PAGE_SIZE - 1),
- * because actually we need the address of the next byte
- * after rp->end.
- */
- aligned_end = (rp->end + PAGE_SIZE) & PAGE_MASK;
-
- if ((rp->start <= user_paddr) &&
- (user_paddr + user_size) <= aligned_end)
- break;
- }
-
- if (i > PCI_ROM_RESOURCE)
+ if (!pbm)
return -EINVAL;
- return 0;
-}
-
-/* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
- * device mapping.
- */
-static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state)
-{
- /* Our io_remap_pfn_range takes care of this, do nothing. */
-}
-
-/* Perform the actual remap of the pages for a PCI device mapping, as appropriate
- * for this architecture. The region in the process to map is described by vm_start
- * and vm_end members of VMA, the base physical address is found in vm_pgoff.
- * The pci device structure is provided so that architectures may make mapping
- * decisions on a per-device or per-bus basis.
- *
- * Returns a negative error code on failure, zero on success.
- */
-int pci_mmap_page_range(struct pci_dev *dev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- int ret;
-
- ret = __pci_mmap_make_offset(dev, vma, mmap_state);
- if (ret < 0)
- return ret;
-
- __pci_mmap_set_pgprot(dev, vma, mmap_state);
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- ret = io_remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- if (ret)
- return ret;
+ vma->vm_pgoff += (ioaddr + pbm->io_space.start) >> PAGE_SHIFT;
return 0;
}
dra7xx_pcie_enable_msi_interrupts(dra7xx);
}
-static int dra7xx_pcie_host_init(struct pcie_port *pp)
+static int dra7xx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
.xlate = pci_irqd_intx_xlate,
};
-static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
+static int dra7xx_pcie_handle_msi(struct dw_pcie_rp *pp, int index)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned long val;
return 1;
}
-static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp)
+static void dra7xx_pcie_handle_msi_irq(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
int ret, i, count, num_ctrls;
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct dra7xx_pcie *dra7xx;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
- struct pcie_port *pp;
unsigned long reg;
u32 bit;
return IRQ_HANDLED;
}
-static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
{
int ret;
struct dw_pcie *pci = dra7xx->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
pp->irq = platform_get_irq(pdev, 1);
return pp->irq;
/* MSI IRQ is muxed */
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = dra7xx_pcie_init_irq_domain(pp);
if (ret < 0)
return (val & PCIE_ELBI_XMLH_LINKUP);
}
-static int exynos_pcie_host_init(struct pcie_port *pp)
+static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct exynos_pcie *ep = to_exynos_pcie(pci);
struct platform_device *pdev)
{
struct dw_pcie *pci = &ep->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
}
pp->ops = &exynos_pcie_host_ops;
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
{
struct exynos_pcie *ep = dev_get_drvdata(dev);
struct dw_pcie *pci = &ep->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
return ret;
}
-static int imx6_pcie_host_init(struct pcie_port *pp)
+static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
{
int ret;
struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct pcie_port *pp = &imx6_pcie->pci->pp;
+ struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
static void imx6_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
/* Bus parent is the PCI bridge, its parent is this platform driver */
if (!bus->dev.parent || !bus->dev.parent->parent)
enum dw_pcie_device_mode mode;
const struct dw_pcie_host_ops *host_ops;
const struct dw_pcie_ep_ops *ep_ops;
- unsigned int version;
+ u32 version;
};
struct keystone_pcie {
static void ks_pcie_msi_irq_ack(struct irq_data *data)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
u32 irq = data->hwirq;
struct dw_pcie *pci;
static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
struct dw_pcie *pci;
u64 msi_target;
static void ks_pcie_msi_mask(struct irq_data *data)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
u32 irq = data->hwirq;
struct dw_pcie *pci;
static void ks_pcie_msi_unmask(struct irq_data *data)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
u32 irq = data->hwirq;
struct dw_pcie *pci;
.irq_unmask = ks_pcie_msi_unmask,
};
-static int ks_pcie_msi_host_init(struct pcie_port *pp)
+static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
return dw_pcie_allocate_domains(pp);
u32 val;
u32 num_viewport = ks_pcie->num_viewport;
struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
u64 start, end;
struct resource *mem;
int i;
static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 reg;
*/
static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
u32 offset = irq - ks_pcie->msi_host_irq;
struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
u32 vector, reg, pos;
return 0;
}
-static int __init ks_pcie_host_init(struct pcie_port *pp)
+static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
.host_ops = &ks_pcie_host_ops,
- .version = 0x365A,
+ .version = DW_PCIE_VER_365A,
};
static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
.host_ops = &ks_pcie_am654_host_ops,
.mode = DW_PCIE_RC_TYPE,
- .version = 0x490A,
+ .version = DW_PCIE_VER_490A,
};
static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
.ep_ops = &ks_pcie_am654_ep_ops,
.mode = DW_PCIE_EP_TYPE,
- .version = 0x490A,
+ .version = DW_PCIE_VER_490A,
};
static const struct of_device_id ks_pcie_of_match[] = {
struct device_link **link;
struct gpio_desc *gpiod;
struct resource *res;
- unsigned int version;
void __iomem *base;
u32 num_viewport;
struct phy **phy;
u32 num_lanes;
char name[10];
+ u32 version;
int ret;
int irq;
int i;
goto err_get_sync;
}
- if (pci->version >= 0x480A)
+ if (dw_pcie_ver_is_ge(pci, 480A))
ret = ks_pcie_am654_set_mode(dev, mode);
else
ret = ks_pcie_set_mode(dev);
const struct ls_pcie_ep_drvdata *drvdata;
};
-static int ls_pcie_establish_link(struct dw_pcie *pci)
-{
- return 0;
-}
-
-static const struct dw_pcie_ops dw_ls_pcie_ep_ops = {
- .start_link = ls_pcie_establish_link,
-};
-
static const struct pci_epc_features*
ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {
.ops = &ls_pcie_ep_ops,
- .dw_pcie_ops = &dw_ls_pcie_ep_ops,
};
static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = {
.func_offset = 0x20000,
.ops = &ls_pcie_ep_ops,
- .dw_pcie_ops = &dw_ls_pcie_ep_ops,
};
static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = {
.func_offset = 0x8000,
.ops = &ls_pcie_ep_ops,
- .dw_pcie_ops = &dw_ls_pcie_ep_ops,
};
static const struct of_device_id ls_pcie_ep_of_match[] = {
iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
}
-static int ls_pcie_host_init(struct pcie_port *pp)
+static int ls_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct ls_pcie *pcie = to_ls_pcie(pci);
return 0;
}
-static int meson_pcie_host_init(struct pcie_port *pp)
+static int meson_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct meson_pcie *mp = to_meson_pcie(pci);
static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp));
unsigned int busnr = bus->number;
struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
static void al_pcie_config_prepare(struct al_pcie *pcie)
{
struct al_pcie_target_bus_cfg *target_bus_cfg;
- struct pcie_port *pp = &pcie->pci->pp;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
unsigned int ecam_bus_mask;
u32 cfg_control_offset;
u8 subordinate_bus;
al_pcie_controller_writel(pcie, cfg_control_offset, reg);
}
-static int al_pcie_host_init(struct pcie_port *pp)
+static int al_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct al_pcie *pcie = to_al_pcie(pci);
return 0;
}
-static int armada8k_pcie_host_init(struct pcie_port *pp)
+static int armada8k_pcie_host_init(struct dw_pcie_rp *pp)
{
u32 reg;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct platform_device *pdev)
{
struct dw_pcie *pci = pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
{
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct dw_pcie_ep *ep = &pci->ep;
switch (artpec6_pcie->mode) {
usleep_range(100, 200);
}
-static int artpec6_pcie_host_init(struct pcie_port *pp)
+static int artpec6_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
return 0;
}
-static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_barno bar, dma_addr_t cpu_addr,
- enum dw_pcie_as_type as_type)
+static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
+ dma_addr_t cpu_addr, enum pci_barno bar)
{
int ret;
u32 free_win;
return -EINVAL;
}
- ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, bar, cpu_addr,
- as_type);
+ ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, type,
+ cpu_addr, bar);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
phys_addr_t phys_addr,
u64 pci_addr, size_t size)
{
- u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 free_win;
+ int ret;
free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
if (free_win >= pci->num_ob_windows) {
return -EINVAL;
}
- dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
- phys_addr, pci_addr, size);
+ ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
+ phys_addr, pci_addr, size);
+ if (ret)
+ return ret;
set_bit(free_win, ep->ob_window_map);
ep->outbound_addr[free_win] = phys_addr;
__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
- dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index);
clear_bit(atu_index, ep->ib_window_map);
ep->epf_bar[bar] = NULL;
}
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
- int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
size_t size = epf_bar->size;
int flags = epf_bar->flags;
- enum dw_pcie_as_type as_type;
- u32 reg;
unsigned int func_offset = 0;
+ int ret, type;
+ u32 reg;
func_offset = dw_pcie_ep_func_select(ep, func_no);
reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;
if (!(flags & PCI_BASE_ADDRESS_SPACE))
- as_type = DW_PCIE_AS_MEM;
+ type = PCIE_ATU_TYPE_MEM;
else
- as_type = DW_PCIE_AS_IO;
+ type = PCIE_ATU_TYPE_IO;
- ret = dw_pcie_ep_inbound_atu(ep, func_no, bar,
- epf_bar->phys_addr, as_type);
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
if (ret)
return ret;
if (ret < 0)
return;
- dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
clear_bit(atu_index, ep->ob_window_map);
}
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- if (pci->ops && pci->ops->stop_link)
- pci->ops->stop_link(pci);
+ dw_pcie_stop_link(pci);
}
static int dw_pcie_ep_start(struct pci_epc *epc)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- if (!pci->ops || !pci->ops->start_link)
- return -EINVAL;
-
- return pci->ops->start_link(pci);
+ return dw_pcie_start_link(pci);
}
static const struct pci_epc_features*
if (!pci->dbi_base2) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
- if (!res)
+ if (!res) {
pci->dbi_base2 = pci->dbi_base + SZ_4K;
- else {
+ } else {
pci->dbi_base2 = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pci->dbi_base2))
return PTR_ERR(pci->dbi_base2);
}
}
- dw_pcie_iatu_detect(pci);
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
return -EINVAL;
ep->phys_base = res->start;
ep->addr_size = resource_size(res);
- ep->ib_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(pci->num_ib_windows),
- sizeof(long),
- GFP_KERNEL);
+ dw_pcie_version_detect(pci);
+
+ dw_pcie_iatu_detect(pci);
+
+ ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+ GFP_KERNEL);
if (!ep->ib_window_map)
return -ENOMEM;
- ep->ob_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(pci->num_ob_windows),
- sizeof(long),
- GFP_KERNEL);
+ ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
+ GFP_KERNEL);
if (!ep->ob_window_map)
return -ENOMEM;
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
epc->mem->window.page_size);
if (!ep->msi_mem) {
+ ret = -ENOMEM;
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
- return -ENOMEM;
+ goto err_exit_epc_mem;
}
if (ep->ops->get_features) {
return 0;
}
- return dw_pcie_ep_init_complete(ep);
+ ret = dw_pcie_ep_init_complete(ep);
+ if (ret)
+ goto err_free_epc_mem;
+
+ return 0;
+
+err_free_epc_mem:
+ pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
+ epc->mem->window.page_size);
+
+err_exit_epc_mem:
+ pci_epc_mem_exit(epc);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
};
/* MSI int handler */
-irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
int i, pos;
unsigned long val;
static void dw_chained_msi_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
chained_irq_enter(chip, desc);
static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target;
static void dw_pci_bottom_mask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
static void dw_pci_bottom_unmask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
static void dw_pci_bottom_ack(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned int virq, unsigned int nr_irqs,
void *args)
{
- struct pcie_port *pp = domain->host_data;
+ struct dw_pcie_rp *pp = domain->host_data;
unsigned long flags;
u32 i;
int bit;
unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct pcie_port *pp = domain->host_data;
+ struct dw_pcie_rp *pp = domain->host_data;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
.free = dw_pcie_irq_domain_free,
};
-int dw_pcie_allocate_domains(struct pcie_port *pp)
+int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
return 0;
}
-static void dw_pcie_free_msi(struct pcie_port *pp)
+static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
{
- if (pp->msi_irq)
- irq_set_chained_handler_and_data(pp->msi_irq, NULL, NULL);
+ u32 ctrl;
+
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
+ if (pp->msi_irq[ctrl] > 0)
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
+ NULL, NULL);
+ }
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg),
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ dma_unmap_page(dev, pp->msi_data, PAGE_SIZE, DMA_FROM_DEVICE);
+ if (pp->msi_page)
+ __free_page(pp->msi_page);
}
}
-static void dw_pcie_msi_init(struct pcie_port *pp)
+static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target = (u64)pp->msi_data;
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
}
-int dw_pcie_host_init(struct pcie_port *pp)
+static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u32 ctrl, max_vectors;
+ int irq;
+
+ /* Parse any "msiX" IRQs described in the devicetree */
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
+ char msi_name[] = "msiX";
+
+ msi_name[3] = '0' + ctrl;
+ irq = platform_get_irq_byname_optional(pdev, msi_name);
+ if (irq == -ENXIO)
+ break;
+ if (irq < 0)
+ return dev_err_probe(dev, irq,
+ "Failed to parse MSI IRQ '%s'\n",
+ msi_name);
+
+ pp->msi_irq[ctrl] = irq;
+ }
+
+ /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
+ if (ctrl == 0)
+ return -ENXIO;
+
+ max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
+ if (pp->num_vectors > max_vectors) {
+ dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
+ max_vectors);
+ pp->num_vectors = max_vectors;
+ }
+ if (!pp->num_vectors)
+ pp->num_vectors = max_vectors;
+
+ return 0;
+}
+
+static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+ u32 ctrl, num_ctrls;
+
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
+ pp->irq_mask[ctrl] = ~0;
+
+ if (!pp->msi_irq[0]) {
+ ret = dw_pcie_parse_split_msi_irq(pp);
+ if (ret < 0 && ret != -ENXIO)
+ return ret;
+ }
+
+ if (!pp->num_vectors)
+ pp->num_vectors = MSI_DEF_NUM_VECTORS;
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ if (!pp->msi_irq[0]) {
+ pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
+ if (pp->msi_irq[0] < 0) {
+ pp->msi_irq[0] = platform_get_irq(pdev, 0);
+ if (pp->msi_irq[0] < 0)
+ return pp->msi_irq[0];
+ }
+ }
+
+ dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
+
+ pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
+
+ ret = dw_pcie_allocate_domains(pp);
+ if (ret)
+ return ret;
+
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ if (pp->msi_irq[ctrl] > 0)
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
+ dw_chained_msi_isr, pp);
+ }
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
+
+ pp->msi_page = alloc_page(GFP_DMA32);
+ pp->msi_data = dma_map_page(dev, pp->msi_page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(dev, pp->msi_data);
+ if (ret) {
+ dev_err(pci->dev, "Failed to map MSI data\n");
+ __free_page(pp->msi_page);
+ pp->msi_page = NULL;
+ pp->msi_data = 0;
+ dw_pcie_free_msi(pp);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+int dw_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win;
struct pci_host_bridge *bridge;
- struct resource *cfg_res;
+ struct resource *res;
int ret;
- raw_spin_lock_init(&pci->pp.lock);
+ raw_spin_lock_init(&pp->lock);
- cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- if (cfg_res) {
- pp->cfg0_size = resource_size(cfg_res);
- pp->cfg0_base = cfg_res->start;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ if (res) {
+ pp->cfg0_size = resource_size(res);
+ pp->cfg0_base = res->start;
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, cfg_res);
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pp->va_cfg0_base))
return PTR_ERR(pp->va_cfg0_base);
} else {
}
if (!pci->dbi_base) {
- struct resource *dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
}
of_property_read_bool(np, "msi-parent") ||
of_property_read_bool(np, "msi-map"));
- if (!pp->num_vectors) {
+ /*
+ * For the has_msi_ctrl case the default assignment is handled
+ * in the dw_pcie_msi_host_init().
+ */
+ if (!pp->has_msi_ctrl && !pp->num_vectors) {
pp->num_vectors = MSI_DEF_NUM_VECTORS;
} else if (pp->num_vectors > MAX_MSI_IRQS) {
dev_err(dev, "Invalid number of vectors\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_deinit_host;
}
if (pp->ops->msi_host_init) {
ret = pp->ops->msi_host_init(pp);
if (ret < 0)
- return ret;
+ goto err_deinit_host;
} else if (pp->has_msi_ctrl) {
- u32 ctrl, num_ctrls;
-
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
- for (ctrl = 0; ctrl < num_ctrls; ctrl++)
- pp->irq_mask[ctrl] = ~0;
-
- if (!pp->msi_irq) {
- pp->msi_irq = platform_get_irq_byname_optional(pdev, "msi");
- if (pp->msi_irq < 0) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
- }
-
- pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
-
- ret = dw_pcie_allocate_domains(pp);
- if (ret)
- return ret;
-
- if (pp->msi_irq > 0)
- irq_set_chained_handler_and_data(pp->msi_irq,
- dw_chained_msi_isr,
- pp);
-
- ret = dma_set_mask(pci->dev, DMA_BIT_MASK(32));
- if (ret)
- dev_warn(pci->dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
-
- pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
- sizeof(pp->msi_msg),
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- ret = dma_mapping_error(pci->dev, pp->msi_data);
- if (ret) {
- dev_err(pci->dev, "Failed to map MSI data\n");
- pp->msi_data = 0;
- goto err_free_msi;
- }
+ ret = dw_pcie_msi_host_init(pp);
+ if (ret < 0)
+ goto err_deinit_host;
}
}
+ dw_pcie_version_detect(pci);
+
dw_pcie_iatu_detect(pci);
- dw_pcie_setup_rc(pp);
+ ret = dw_pcie_setup_rc(pp);
+ if (ret)
+ goto err_free_msi;
- if (!dw_pcie_link_up(pci) && pci->ops && pci->ops->start_link) {
- ret = pci->ops->start_link(pci);
+ if (!dw_pcie_link_up(pci)) {
+ ret = dw_pcie_start_link(pci);
if (ret)
goto err_free_msi;
}
bridge->sysdata = pp;
ret = pci_host_probe(bridge);
- if (!ret)
- return 0;
+ if (ret)
+ goto err_stop_link;
+
+ return 0;
+
+err_stop_link:
+ dw_pcie_stop_link(pci);
err_free_msi:
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
+
+err_deinit_host:
+ if (pp->ops->host_deinit)
+ pp->ops->host_deinit(pp);
+
return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_host_init);
-void dw_pcie_host_deinit(struct pcie_port *pp)
+void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
pci_stop_root_bus(pp->bridge->bus);
pci_remove_root_bus(pp->bridge->bus);
+
+ dw_pcie_stop_link(pci);
+
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
+
+ if (pp->ops->host_deinit)
+ pp->ops->host_deinit(pp);
}
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- int type;
- u32 busdev;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int type, ret;
+ u32 busdev;
/*
* Checking whether the link is up here is a last line of defense
else
type = PCIE_ATU_TYPE_CFG1;
-
- dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size);
+ ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
+ pp->cfg0_size);
+ if (ret)
+ return NULL;
return pp->va_cfg0_base + where;
}
static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- int ret;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
ret = pci_generic_config_read(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
- if (!ret && pci->io_cfg_atu_shared)
- dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ if (pp->cfg0_io_shared) {
+ ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
+ pp->io_base, pp->io_bus_addr,
+ pp->io_size);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+ }
- return ret;
+ return PCIBIOS_SUCCESSFUL;
}
static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
- int ret;
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
ret = pci_generic_config_write(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
- if (!ret && pci->io_cfg_atu_shared)
- dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ if (pp->cfg0_io_shared) {
+ ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
+ pp->io_base, pp->io_bus_addr,
+ pp->io_size);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+ }
- return ret;
+ return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops dw_child_pcie_ops = {
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
if (PCI_SLOT(devfn) > 0)
.write = pci_generic_config_write,
};
-void dw_pcie_setup_rc(struct pcie_port *pp)
+static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
{
- int i;
- u32 val, ctrl, num_ctrls;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct resource_entry *entry;
+ int i, ret;
+
+ /* Note the very first outbound ATU is used for CFG IOs */
+ if (!pci->num_ob_windows) {
+ dev_err(pci->dev, "No outbound iATU found\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure all outbound windows are disabled before proceeding with
+ * the MEM/IO ranges setups.
+ */
+ for (i = 0; i < pci->num_ob_windows; i++)
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
+
+ i = 0;
+ resource_list_for_each_entry(entry, &pp->bridge->windows) {
+ if (resource_type(entry->res) != IORESOURCE_MEM)
+ continue;
+
+ if (pci->num_ob_windows <= ++i)
+ break;
+
+ ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
+ entry->res->start,
+ entry->res->start - entry->offset,
+ resource_size(entry->res));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set MEM range %pr\n",
+ entry->res);
+ return ret;
+ }
+ }
+
+ if (pp->io_size) {
+ if (pci->num_ob_windows > ++i) {
+ ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
+ pp->io_base,
+ pp->io_bus_addr,
+ pp->io_size);
+ if (ret) {
+ dev_err(pci->dev, "Failed to set IO range %pr\n",
+ entry->res);
+ return ret;
+ }
+ } else {
+ pp->cfg0_io_shared = true;
+ }
+ }
+
+ if (pci->num_ob_windows <= i)
+ dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)\n",
+ pci->num_ob_windows);
+
+ return 0;
+}
+
+int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u32 val, ctrl, num_ctrls;
+ int ret;
/*
* Enable DBI read-only registers for writing/updating configuration.
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
- /* Ensure all outbound windows are disabled so there are multiple matches */
- for (i = 0; i < pci->num_ob_windows; i++)
- dw_pcie_disable_atu(pci, i, DW_PCIE_REGION_OUTBOUND);
-
/*
* If the platform provides its own child bus config accesses, it means
* the platform uses its own address translation component rather than
* ATU, so we should not program the ATU here.
*/
if (pp->bridge->child_ops == &dw_child_pcie_ops) {
- int atu_idx = 0;
- struct resource_entry *entry;
-
- /* Get last memory resource entry */
- resource_list_for_each_entry(entry, &pp->bridge->windows) {
- if (resource_type(entry->res) != IORESOURCE_MEM)
- continue;
-
- if (pci->num_ob_windows <= ++atu_idx)
- break;
-
- dw_pcie_prog_outbound_atu(pci, atu_idx,
- PCIE_ATU_TYPE_MEM, entry->res->start,
- entry->res->start - entry->offset,
- resource_size(entry->res));
- }
-
- if (pp->io_size) {
- if (pci->num_ob_windows > ++atu_idx)
- dw_pcie_prog_outbound_atu(pci, atu_idx,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
- else
- pci->io_cfg_atu_shared = true;
- }
-
- if (pci->num_ob_windows <= atu_idx)
- dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)",
- pci->num_ob_windows);
+ ret = dw_pcie_iatu_setup(pp);
+ if (ret)
+ return ret;
}
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
#include <linux/platform_device.h>
#include <linux/resource.h>
#include <linux/types.h>
-#include <linux/regmap.h>
#include "pcie-designware.h"
struct dw_plat_pcie {
struct dw_pcie *pci;
- struct regmap *regmap;
enum dw_pcie_device_mode mode;
};
enum dw_pcie_device_mode mode;
};
-static const struct of_device_id dw_plat_pcie_of_match[];
-
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
};
-static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
-{
- return 0;
-}
-
-static const struct dw_pcie_ops dw_pcie_ops = {
- .start_link = dw_plat_pcie_establish_link,
-};
-
static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct platform_device *pdev)
{
struct dw_pcie *pci = dw_plat_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
return -ENOMEM;
pci->dev = dev;
- pci->ops = &dw_pcie_ops;
dw_plat_pcie->pci = pci;
dw_plat_pcie->mode = mode;
return -ENODEV;
ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
- if (ret < 0)
- return ret;
break;
case DW_PCIE_EP_TYPE:
if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
return -ENODEV;
pci->ep.ops = &pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
+ ret = -EINVAL;
+ break;
}
- return 0;
+ return ret;
}
static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
* Author: Jingoo Han <jg1.han@samsung.com>
*/
+#include <linux/align.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/sizes.h>
#include <linux/types.h>
#include "../../pci.h"
#include "pcie-designware.h"
+void dw_pcie_version_detect(struct dw_pcie *pci)
+{
+ u32 ver;
+
+ /* The content of the CSR is zero on DWC PCIe older than v4.70a */
+ ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER);
+ if (!ver)
+ return;
+
+ if (pci->version && pci->version != ver)
+ dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n",
+ pci->version, ver);
+ else
+ pci->version = ver;
+
+ ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE);
+
+ if (pci->type && pci->type != ver)
+ dev_warn(pci->dev, "Types don't match (%08x != %08x)\n",
+ pci->type, ver);
+ else
+ pci->type = ver;
+}
+
/*
* These interfaces resemble the pci_find_*capability() interfaces, but these
* are for configuring host controllers, which are bridges *to* PCI devices but
dev_err(pci->dev, "write DBI address failed\n");
}
-static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
+static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
+ u32 index)
{
+ if (pci->iatu_unroll_enabled)
+ return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index);
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index);
+ return pci->atu_base;
+}
+
+static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg)
+{
+ void __iomem *base;
int ret;
u32 val;
+ base = dw_pcie_select_atu(pci, dir, index);
+
if (pci->ops && pci->ops->read_dbi)
- return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
+ return pci->ops->read_dbi(pci, base, reg, 4);
- ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
+ ret = dw_pcie_read(base + reg, 4, &val);
if (ret)
dev_err(pci->dev, "Read ATU address failed\n");
return val;
}
-static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
+static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index,
+ u32 reg, u32 val)
{
+ void __iomem *base;
int ret;
+ base = dw_pcie_select_atu(pci, dir, index);
+
if (pci->ops && pci->ops->write_dbi) {
- pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
+ pci->ops->write_dbi(pci, base, reg, 4, val);
return;
}
- ret = dw_pcie_write(pci->atu_base + reg, 4, val);
+ ret = dw_pcie_write(base + reg, 4, val);
if (ret)
dev_err(pci->dev, "Write ATU address failed\n");
}
-static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg)
{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
-
- return dw_pcie_readl_atu(pci, offset + reg);
+ return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg);
}
-static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
- u32 val)
+static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
-
- dw_pcie_writel_atu(pci, offset + reg, val);
+ dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val);
}
static inline u32 dw_pcie_enable_ecrc(u32 val)
return val | PCIE_ATU_TD;
}
-static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
- int index, int type,
- u64 cpu_addr, u64 pci_addr,
- u64 size)
+static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
{
u32 retries, val;
- u64 limit_addr = cpu_addr + size - 1;
-
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
- upper_32_bits(cpu_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
- lower_32_bits(limit_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
- upper_32_bits(limit_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
- upper_32_bits(pci_addr));
- val = type | PCIE_ATU_FUNC_NUM(func_no);
- val = upper_32_bits(size - 1) ?
- val | PCIE_ATU_INCREASE_REGION_SIZE : val;
- if (pci->version == 0x490A)
- val = dw_pcie_enable_ecrc(val);
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, val);
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
- PCIE_ATU_ENABLE);
+ u64 limit_addr;
- /*
- * Make sure ATU enable takes effect before any subsequent config
- * and I/O accesses.
- */
- for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_ob_unroll(pci, index,
- PCIE_ATU_UNR_REGION_CTRL2);
- if (val & PCIE_ATU_ENABLE)
- return;
+ if (pci->ops && pci->ops->cpu_addr_fixup)
+ cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
- mdelay(LINK_WAIT_IATU);
+ limit_addr = cpu_addr + size - 1;
+
+ if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
+ return -EINVAL;
}
- dev_err(pci->dev, "Outbound iATU is not being enabled\n");
-}
-static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
- int index, int type, u64 cpu_addr,
- u64 pci_addr, u64 size)
-{
- u32 retries, val;
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(cpu_addr));
- if (pci->ops && pci->ops->cpu_addr_fixup)
- cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
+ lower_32_bits(limit_addr));
+ if (dw_pcie_ver_is_ge(pci, 460A))
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
+ upper_32_bits(limit_addr));
- if (pci->iatu_unroll_enabled) {
- dw_pcie_prog_outbound_atu_unroll(pci, func_no, index, type,
- cpu_addr, pci_addr, size);
- return;
- }
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
- PCIE_ATU_REGION_OUTBOUND | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
- upper_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
- lower_32_bits(cpu_addr + size - 1));
- if (pci->version >= 0x460A)
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_LIMIT,
- upper_32_bits(cpu_addr + size - 1));
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(pci_addr));
val = type | PCIE_ATU_FUNC_NUM(func_no);
- val = ((upper_32_bits(size - 1)) && (pci->version >= 0x460A)) ?
- val | PCIE_ATU_INCREASE_REGION_SIZE : val;
- if (pci->version == 0x490A)
+ if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
+ dw_pcie_ver_is_ge(pci, 460A))
+ val |= PCIE_ATU_INCREASE_REGION_SIZE;
+ if (dw_pcie_ver_is(pci, 490A))
val = dw_pcie_enable_ecrc(val);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, val);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
+
+ dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+ val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
- return;
+ return 0;
mdelay(LINK_WAIT_IATU);
}
+
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
-}
-void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size)
-{
- __dw_pcie_prog_outbound_atu(pci, 0, index, type,
- cpu_addr, pci_addr, size);
+ return -ETIMEDOUT;
}
-void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size)
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size)
{
- __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
- cpu_addr, pci_addr, size);
+ return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
+ cpu_addr, pci_addr, size);
}
-static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr,
+ u64 size)
{
- u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
-
- return dw_pcie_readl_atu(pci, offset + reg);
+ return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
+ cpu_addr, pci_addr, size);
}
-static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
- u32 val)
+static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
{
- u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
-
- dw_pcie_writel_atu(pci, offset + reg, val);
+ return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
}
-static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
- int index, int bar, u64 cpu_addr,
- enum dw_pcie_as_type as_type)
+static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
{
- int type;
- u32 retries, val;
-
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
- upper_32_bits(cpu_addr));
-
- switch (as_type) {
- case DW_PCIE_AS_MEM:
- type = PCIE_ATU_TYPE_MEM;
- break;
- case DW_PCIE_AS_IO:
- type = PCIE_ATU_TYPE_IO;
- break;
- default:
- return -EINVAL;
- }
-
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
- PCIE_ATU_FUNC_NUM(func_no));
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
- PCIE_ATU_FUNC_NUM_MATCH_EN |
- PCIE_ATU_ENABLE |
- PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
-
- /*
- * Make sure ATU enable takes effect before any subsequent config
- * and I/O accesses.
- */
- for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_ib_unroll(pci, index,
- PCIE_ATU_UNR_REGION_CTRL2);
- if (val & PCIE_ATU_ENABLE)
- return 0;
-
- mdelay(LINK_WAIT_IATU);
- }
- dev_err(pci->dev, "Inbound iATU is not being enabled\n");
-
- return -EBUSY;
+ dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);
}
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int bar, u64 cpu_addr,
- enum dw_pcie_as_type as_type)
+ int type, u64 cpu_addr, u8 bar)
{
- int type;
u32 retries, val;
- if (pci->iatu_unroll_enabled)
- return dw_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
- cpu_addr, as_type);
-
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
- index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
-
- switch (as_type) {
- case DW_PCIE_AS_MEM:
- type = PCIE_ATU_TYPE_MEM;
- break;
- case DW_PCIE_AS_IO:
- type = PCIE_ATU_TYPE_IO;
- break;
- default:
+ if (!IS_ALIGNED(cpu_addr, pci->region_align))
return -EINVAL;
- }
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
- PCIE_ATU_FUNC_NUM(func_no));
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
- PCIE_ATU_FUNC_NUM_MATCH_EN |
- PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(cpu_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
+ PCIE_ATU_FUNC_NUM(func_no));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2,
+ PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN |
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+ val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
return 0;
mdelay(LINK_WAIT_IATU);
}
+
dev_err(pci->dev, "Inbound iATU is not being enabled\n");
- return -EBUSY;
+ return -ETIMEDOUT;
}
-void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
- enum dw_pcie_region_type type)
+void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index)
{
- int region;
-
- switch (type) {
- case DW_PCIE_REGION_INBOUND:
- region = PCIE_ATU_REGION_INBOUND;
- break;
- case DW_PCIE_REGION_OUTBOUND:
- region = PCIE_ATU_REGION_OUTBOUND;
- break;
- default:
- return;
- }
-
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE);
+ dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0);
}
int dw_pcie_wait_for_link(struct dw_pcie *pci)
{
+ u32 offset, val;
int retries;
/* Check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
- if (dw_pcie_link_up(pci)) {
- dev_info(pci->dev, "Link up\n");
- return 0;
- }
+ if (dw_pcie_link_up(pci))
+ break;
+
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
- dev_info(pci->dev, "Phy link never came up\n");
+ if (retries >= LINK_WAIT_MAX_RETRIES) {
+ dev_err(pci->dev, "Phy link never came up\n");
+ return -ETIMEDOUT;
+ }
- return -ETIMEDOUT;
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+
+ dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
+ FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
+ FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
+
+ return 0;
}
EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
if (pci->ops && pci->ops->link_up)
return pci->ops->link_up(pci);
- val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1);
return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
}
}
-static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
+static bool dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
{
u32 val;
val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
if (val == 0xffffffff)
- return 1;
+ return true;
- return 0;
+ return false;
}
-static void dw_pcie_iatu_detect_regions_unroll(struct dw_pcie *pci)
+static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
{
- int max_region, i, ob = 0, ib = 0;
- u32 val;
+ int max_region, ob, ib;
+ u32 val, min, dir;
+ u64 max;
- max_region = min((int)pci->atu_size / 512, 256);
-
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
- 0x11110000);
+ if (pci->iatu_unroll_enabled) {
+ max_region = min((int)pci->atu_size / 512, 256);
+ } else {
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
+ max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
+ }
- val = dw_pcie_readl_ob_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
- if (val == 0x11110000)
- ob++;
- else
+ for (ob = 0; ob < max_region; ob++) {
+ dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET);
+ if (val != 0x11110000)
break;
}
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET,
- 0x11110000);
-
- val = dw_pcie_readl_ib_unroll(pci, i, PCIE_ATU_UNR_LOWER_TARGET);
- if (val == 0x11110000)
- ib++;
- else
+ for (ib = 0; ib < max_region; ib++) {
+ dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET);
+ if (val != 0x11110000)
break;
}
- pci->num_ib_windows = ib;
- pci->num_ob_windows = ob;
-}
-
-static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
-{
- int max_region, i, ob = 0, ib = 0;
- u32 val;
-
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
- max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | i);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
- if (val == 0x11110000)
- ob++;
- else
- break;
+ if (ob) {
+ dir = PCIE_ATU_REGION_DIR_OB;
+ } else if (ib) {
+ dir = PCIE_ATU_REGION_DIR_IB;
+ } else {
+ dev_err(pci->dev, "No iATU regions found\n");
+ return;
}
- for (i = 0; i < max_region; i++) {
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | i);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, 0x11110000);
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_LOWER_TARGET);
- if (val == 0x11110000)
- ib++;
- else
- break;
+ dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0);
+ min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT);
+
+ if (dw_pcie_ver_is_ge(pci, 460A)) {
+ dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF);
+ max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT);
+ } else {
+ max = 0;
}
- pci->num_ib_windows = ib;
pci->num_ob_windows = ob;
+ pci->num_ib_windows = ib;
+ pci->region_align = 1 << fls(min);
+ pci->region_limit = (max << 32) | (SZ_4G - 1);
}
void dw_pcie_iatu_detect(struct dw_pcie *pci)
{
- struct device *dev = pci->dev;
- struct platform_device *pdev = to_platform_device(dev);
+ struct platform_device *pdev = to_platform_device(pci->dev);
- if (pci->version >= 0x480A || (!pci->version &&
- dw_pcie_iatu_unroll_enabled(pci))) {
- pci->iatu_unroll_enabled = true;
+ pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
+ if (pci->iatu_unroll_enabled) {
if (!pci->atu_base) {
struct resource *res =
platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
if (res) {
pci->atu_size = resource_size(res);
- pci->atu_base = devm_ioremap_resource(dev, res);
+ pci->atu_base = devm_ioremap_resource(pci->dev, res);
}
if (!pci->atu_base || IS_ERR(pci->atu_base))
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
if (!pci->atu_size)
/* Pick a minimal default, enough for 8 in and 8 out windows */
pci->atu_size = SZ_4K;
+ } else {
+ pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
+ pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
+ }
- dw_pcie_iatu_detect_regions_unroll(pci);
- } else
- dw_pcie_iatu_detect_regions(pci);
+ dw_pcie_iatu_detect_regions(pci);
dev_info(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
"enabled" : "disabled");
- dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
- pci->num_ob_windows, pci->num_ib_windows);
+ dev_info(pci->dev, "iATU regions: %u ob, %u ib, align %uK, limit %lluG\n",
+ pci->num_ob_windows, pci->num_ib_windows,
+ pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
}
void dw_pcie_setup(struct dw_pcie *pci)
{
+ struct device_node *np = pci->dev->of_node;
u32 val;
- struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
if (pci->link_gen > 0)
dw_pcie_link_set_max_speed(pci, pci->link_gen);
val |= PORT_LINK_DLL_LINK_EN;
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+ if (of_property_read_bool(np, "snps,enable-cdm-check")) {
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
+ PCIE_PL_CHK_REG_CHK_REG_START;
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ }
+
of_property_read_u32(np, "num-lanes", &pci->num_lanes);
if (!pci->num_lanes) {
dev_dbg(pci->dev, "Using h/w default number of lanes\n");
break;
}
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
-
- if (of_property_read_bool(np, "snps,enable-cdm-check")) {
- val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
- val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
- PCIE_PL_CHK_REG_CHK_REG_START;
- dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
- }
}
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+/* DWC PCIe IP-core versions (native support since v4.70a) */
+#define DW_PCIE_VER_365A 0x3336352a
+#define DW_PCIE_VER_460A 0x3436302a
+#define DW_PCIE_VER_470A 0x3437302a
+#define DW_PCIE_VER_480A 0x3438302a
+#define DW_PCIE_VER_490A 0x3439302a
+#define DW_PCIE_VER_520A 0x3532302a
+
+#define __dw_pcie_ver_cmp(_pci, _ver, _op) \
+ ((_pci)->version _op DW_PCIE_VER_ ## _ver)
+
+#define dw_pcie_ver_is(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, ==)
+
+#define dw_pcie_ver_is_ge(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, >=)
+
+#define dw_pcie_ver_type_is(_pci, _ver, _type) \
+ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \
+ __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, ==))
+
+#define dw_pcie_ver_type_is_ge(_pci, _ver, _type) \
+ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \
+ __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, >=))
+
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
+#define PCIE_VERSION_NUMBER 0x8F8
+#define PCIE_VERSION_TYPE 0x8FC
+
+/*
+ * iATU inbound and outbound windows CSRs. Before the IP-core v4.80a each
+ * iATU region CSRs had been indirectly accessible by means of the dedicated
+ * viewport selector. The iATU/eDMA CSRs space was re-designed in DWC PCIe
+ * v4.80a in a way so the viewport was unrolled into the directly accessible
+ * iATU/eDMA CSRs space.
+ */
#define PCIE_ATU_VIEWPORT 0x900
-#define PCIE_ATU_REGION_INBOUND BIT(31)
-#define PCIE_ATU_REGION_OUTBOUND 0
-#define PCIE_ATU_CR1 0x904
+#define PCIE_ATU_REGION_DIR_IB BIT(31)
+#define PCIE_ATU_REGION_DIR_OB 0
+#define PCIE_ATU_VIEWPORT_BASE 0x904
+#define PCIE_ATU_UNROLL_BASE(dir, index) \
+ (((index) << 9) | ((dir == PCIE_ATU_REGION_DIR_IB) ? BIT(8) : 0))
+#define PCIE_ATU_VIEWPORT_SIZE 0x2C
+#define PCIE_ATU_REGION_CTRL1 0x000
#define PCIE_ATU_INCREASE_REGION_SIZE BIT(13)
#define PCIE_ATU_TYPE_MEM 0x0
#define PCIE_ATU_TYPE_IO 0x2
#define PCIE_ATU_TYPE_CFG1 0x5
#define PCIE_ATU_TD BIT(8)
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
-#define PCIE_ATU_CR2 0x908
+#define PCIE_ATU_REGION_CTRL2 0x004
#define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
-#define PCIE_ATU_LOWER_BASE 0x90C
-#define PCIE_ATU_UPPER_BASE 0x910
-#define PCIE_ATU_LIMIT 0x914
-#define PCIE_ATU_LOWER_TARGET 0x918
+#define PCIE_ATU_LOWER_BASE 0x008
+#define PCIE_ATU_UPPER_BASE 0x00C
+#define PCIE_ATU_LIMIT 0x010
+#define PCIE_ATU_LOWER_TARGET 0x014
#define PCIE_ATU_BUS(x) FIELD_PREP(GENMASK(31, 24), x)
#define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x)
#define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x)
-#define PCIE_ATU_UPPER_TARGET 0x91C
-#define PCIE_ATU_UPPER_LIMIT 0x924
+#define PCIE_ATU_UPPER_TARGET 0x018
+#define PCIE_ATU_UPPER_LIMIT 0x020
#define PCIE_MISC_CONTROL_1_OFF 0x8BC
#define PCIE_DBI_RO_WR_EN BIT(0)
#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28
-/*
- * iATU Unroll-specific register definitions
- * From 4.80 core version the address translation will be made by unroll
- */
-#define PCIE_ATU_UNR_REGION_CTRL1 0x00
-#define PCIE_ATU_UNR_REGION_CTRL2 0x04
-#define PCIE_ATU_UNR_LOWER_BASE 0x08
-#define PCIE_ATU_UNR_UPPER_BASE 0x0C
-#define PCIE_ATU_UNR_LOWER_LIMIT 0x10
-#define PCIE_ATU_UNR_LOWER_TARGET 0x14
-#define PCIE_ATU_UNR_UPPER_TARGET 0x18
-#define PCIE_ATU_UNR_UPPER_LIMIT 0x20
-
/*
* The default address offset between dbi_base and atu_base. Root controller
* drivers are not required to initialize atu_base if the offset matches this
*/
#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
-/* Register address builder */
-#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
- ((region) << 9)
-
-#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
- (((region) << 9) | BIT(8))
-
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
#define MAX_IATU_IN 256
#define MAX_IATU_OUT 256
-struct pcie_port;
struct dw_pcie;
+struct dw_pcie_rp;
struct dw_pcie_ep;
-enum dw_pcie_region_type {
- DW_PCIE_REGION_UNKNOWN,
- DW_PCIE_REGION_INBOUND,
- DW_PCIE_REGION_OUTBOUND,
-};
-
enum dw_pcie_device_mode {
DW_PCIE_UNKNOWN_TYPE,
DW_PCIE_EP_TYPE,
};
struct dw_pcie_host_ops {
- int (*host_init)(struct pcie_port *pp);
- int (*msi_host_init)(struct pcie_port *pp);
+ int (*host_init)(struct dw_pcie_rp *pp);
+ void (*host_deinit)(struct dw_pcie_rp *pp);
+ int (*msi_host_init)(struct dw_pcie_rp *pp);
};
-struct pcie_port {
+struct dw_pcie_rp {
bool has_msi_ctrl:1;
+ bool cfg0_io_shared:1;
u64 cfg0_base;
void __iomem *va_cfg0_base;
u32 cfg0_size;
u32 io_size;
int irq;
const struct dw_pcie_host_ops *ops;
- int msi_irq;
+ int msi_irq[MAX_MSI_CTRLS];
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
- u16 msi_msg;
dma_addr_t msi_data;
+ struct page *msi_page;
struct irq_chip *msi_irq_chip;
u32 num_vectors;
u32 irq_mask[MAX_MSI_CTRLS];
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
-enum dw_pcie_as_type {
- DW_PCIE_AS_UNKNOWN,
- DW_PCIE_AS_MEM,
- DW_PCIE_AS_IO,
-};
-
struct dw_pcie_ep_ops {
void (*ep_init)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
struct device *dev;
void __iomem *dbi_base;
void __iomem *dbi_base2;
- /* Used when iatu_unroll_enabled is true */
void __iomem *atu_base;
size_t atu_size;
u32 num_ib_windows;
u32 num_ob_windows;
- struct pcie_port pp;
+ u32 region_align;
+ u64 region_limit;
+ struct dw_pcie_rp pp;
struct dw_pcie_ep ep;
const struct dw_pcie_ops *ops;
- unsigned int version;
+ u32 version;
+ u32 type;
int num_lanes;
int link_gen;
u8 n_fts[2];
bool iatu_unroll_enabled: 1;
- bool io_cfg_atu_shared: 1;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
#define to_dw_pcie_from_ep(endpoint) \
container_of((endpoint), struct dw_pcie, ep)
+void dw_pcie_version_detect(struct dw_pcie *pci);
+
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
int dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
-void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size);
-void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size);
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 cpu_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int bar, u64 cpu_addr,
- enum dw_pcie_as_type as_type);
-void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
- enum dw_pcie_region_type type);
+ int type, u64 cpu_addr, u8 bar);
+void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
dw_pcie_writel_dbi(pci, reg, val);
}
+static inline int dw_pcie_start_link(struct dw_pcie *pci)
+{
+ if (pci->ops && pci->ops->start_link)
+ return pci->ops->start_link(pci);
+
+ return 0;
+}
+
+static inline void dw_pcie_stop_link(struct dw_pcie *pci)
+{
+ if (pci->ops && pci->ops->stop_link)
+ pci->ops->stop_link(pci);
+}
+
#ifdef CONFIG_PCIE_DW_HOST
-irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
-void dw_pcie_setup_rc(struct pcie_port *pp);
-int dw_pcie_host_init(struct pcie_port *pp);
-void dw_pcie_host_deinit(struct pcie_port *pp);
-int dw_pcie_allocate_domains(struct pcie_port *pp);
+irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
+int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
+int dw_pcie_host_init(struct dw_pcie_rp *pp);
+void dw_pcie_host_deinit(struct dw_pcie_rp *pp);
+int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
-static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
return IRQ_NONE;
}
-static inline void dw_pcie_setup_rc(struct pcie_port *pp)
+static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
+ return 0;
}
-static inline int dw_pcie_host_init(struct pcie_port *pp)
+static inline int dw_pcie_host_init(struct dw_pcie_rp *pp)
{
return 0;
}
-static inline void dw_pcie_host_deinit(struct pcie_port *pp)
+static inline void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
}
-static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
+static inline int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
return 0;
}
return 0;
}
-static int rockchip_pcie_host_init(struct pcie_port *pp)
+static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
{
struct device *dev = &pdev->dev;
struct rockchip_pcie *rockchip;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
int ret;
rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
return ret;
}
-static int fu740_pcie_host_init(struct pcie_port *pp)
+static int fu740_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct fu740_pcie *afp = to_fu740_pcie(pci);
writel(val, histb_pcie->ctrl + reg);
}
-static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
+static void histb_pcie_dbi_w_mode(struct dw_pcie_rp *pp, bool enable)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val);
}
-static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable)
+static void histb_pcie_dbi_r_mode(struct dw_pcie_rp *pp, bool enable)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
return 0;
}
-static int histb_pcie_host_init(struct pcie_port *pp)
+static int histb_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
regulator_disable(hipcie->vpcie);
}
-static int histb_pcie_host_enable(struct pcie_port *pp)
+static int histb_pcie_host_enable(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
{
struct histb_pcie *hipcie;
struct dw_pcie *pci;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
enum of_gpio_flags of_flags;
#define BUS_IATU_OFFSET SZ_256M
#define RESET_INTERVAL_MS 100
-struct intel_pcie_soc {
- unsigned int pcie_ver;
-};
-
struct intel_pcie {
struct dw_pcie pci;
void __iomem *app_base;
intel_pcie_ltssm_disable(pcie);
intel_pcie_link_setup(pcie);
intel_pcie_init_n_fts(pci);
- dw_pcie_setup_rc(&pci->pp);
+
+ ret = dw_pcie_setup_rc(&pci->pp);
+ if (ret)
+ goto app_init_err;
+
dw_pcie_upconfig_setup(pci);
intel_pcie_device_rst_deassert(pcie);
static int intel_pcie_remove(struct platform_device *pdev)
{
struct intel_pcie *pcie = platform_get_drvdata(pdev);
- struct pcie_port *pp = &pcie->pci.pp;
+ struct dw_pcie_rp *pp = &pcie->pci.pp;
dw_pcie_host_deinit(pp);
__intel_pcie_remove(pcie);
return intel_pcie_host_setup(pcie);
}
-static int intel_pcie_rc_init(struct pcie_port *pp)
+static int intel_pcie_rc_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct intel_pcie *pcie = dev_get_drvdata(pci->dev);
.host_init = intel_pcie_rc_init,
};
-static const struct intel_pcie_soc pcie_data = {
- .pcie_ver = 0x520A,
-};
-
static int intel_pcie_probe(struct platform_device *pdev)
{
- const struct intel_pcie_soc *data;
struct device *dev = &pdev->dev;
struct intel_pcie *pcie;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
int ret;
if (ret)
return ret;
- data = device_get_match_data(dev);
- if (!data)
- return -ENODEV;
-
pci->ops = &intel_pcie_ops;
- pci->version = data->pcie_ver;
pp->ops = &intel_pcie_dw_ops;
ret = dw_pcie_host_init(pp);
};
static const struct of_device_id of_intel_pcie_match[] = {
- { .compatible = "intel,lgm-pcie", .data = &pcie_data },
+ { .compatible = "intel,lgm-pcie" },
{}
};
struct keembay_pcie *pcie = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
u32 val, mask, status;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
/*
* Keem Bay PCIe Controller provides an additional IP logic on top of
struct platform_device *pdev)
{
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
u32 val;
int ret;
pp->ops = &keembay_pcie_host_ops;
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = keembay_pcie_setup_msi_irq(pcie);
if (ret)
return 0;
}
-static int kirin_pcie_host_init(struct pcie_port *pp)
+static int kirin_pcie_host_init(struct dw_pcie_rp *pp)
{
pp->bridge->ops = &kirin_pci_ops;
return 0;
}
-static int qcom_pcie_host_init(struct pcie_port *pp)
+static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct qcom_pcie *pcie = to_qcom_pcie(pci);
static int qcom_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
struct qcom_pcie *pcie;
const struct qcom_pcie_cfg *pcie_cfg;
struct spear13xx_pcie *spear13xx_pcie = arg;
struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
unsigned int status;
status = readl(&app_reg->int_sts);
return 0;
}
-static int spear13xx_pcie_host_init(struct pcie_port *pp)
+static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
struct platform_device *pdev)
{
struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
}
pp->ops = &spear13xx_pcie_host_ops;
- pp->msi_irq = -ENODEV;
+ pp->msi_irq[0] = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
u32 val, u32 reg)
{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+ u32 offset = PCIE_ATU_UNROLL_BASE(PCIE_ATU_REGION_DIR_OB, index) +
+ PCIE_ATU_VIEWPORT_BASE;
writel(val, pcie_ecam->iatu_base + offset + reg);
}
PCIE_ATU_LIMIT);
atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
PCIE_ATU_UPPER_TARGET);
- atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
- atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ atu_reg_write(pcie_ecam, index, type, PCIE_ATU_REGION_CTRL1);
+ atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_REGION_CTRL2);
}
static void __iomem *tegra194_map_bus(struct pci_bus *bus,
enum dw_pcie_device_mode mode;
};
-static void apply_bad_link_workaround(struct pcie_port *pp)
+static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct tegra194_pcie *pcie = to_tegra_pcie(pci);
{
struct tegra194_pcie *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
u32 val, tmp;
u16 val_w;
static inline void init_debugfs(struct tegra194_pcie *pcie) { return; }
#endif
-static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct tegra194_pcie *pcie = to_tegra_pcie(pci);
val_w);
}
-static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct tegra194_pcie *pcie = to_tegra_pcie(pci);
appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
}
-static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct tegra194_pcie *pcie = to_tegra_pcie(pci);
appl_writel(pcie, val, APPL_INTR_EN_L0_0);
}
-static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
+static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct tegra194_pcie *pcie = to_tegra_pcie(pci);
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
}
-static int tegra194_pcie_host_init(struct pcie_port *pp)
+static int tegra194_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct tegra194_pcie *pcie = to_tegra_pcie(pci);
{
u32 val, offset, speed, tmp;
struct tegra194_pcie *pcie = to_tegra_pcie(pci);
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
bool retry = true;
if (pcie->mode == DW_PCIE_EP_TYPE) {
static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie)
{
- struct pcie_port *pp = &pcie->pci.pp;
+ struct dw_pcie_rp *pp = &pcie->pci.pp;
struct pci_bus *child, *root_bus = NULL;
struct pci_dev *pdev;
static int tegra_pcie_init_controller(struct tegra194_pcie *pcie)
{
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
int ret;
ret = tegra_pcie_config_controller(pcie, false);
struct device *dev = &pdev->dev;
struct resource *atu_dma_res;
struct tegra194_pcie *pcie;
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
struct dw_pcie *pci;
struct phy **phys;
char *name;
pci->ops = &tegra_dw_pcie_ops;
pci->n_fts[0] = N_FTS_VAL;
pci->n_fts[1] = FTS_VAL;
- pci->version = 0x490A;
pp = &pci->pp;
pp->num_vectors = MAX_MSI_IRQS;
disable_irq(pcie->pci.pp.irq);
if (IS_ENABLED(CONFIG_PCI_MSI))
- disable_irq(pcie->pci.pp.msi_irq);
+ disable_irq(pcie->pci.pp.msi_irq[0]);
tegra194_pcie_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
static void uniphier_pcie_irq_mask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
unsigned long flags;
static void uniphier_pcie_irq_unmask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
unsigned long flags;
static void uniphier_pcie_irq_handler(struct irq_desc *desc)
{
- struct pcie_port *pp = irq_desc_get_handler_data(desc);
+ struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
struct irq_chip *chip = irq_desc_get_chip(desc);
chained_irq_exit(chip, desc);
}
-static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
+static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
return ret;
}
-static int uniphier_pcie_host_init(struct pcie_port *pp)
+static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
*/
static u64 visconti_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
return cpu_addr & ~pp->io_base;
}
.stop_link = visconti_pcie_stop_link,
};
-static int visconti_pcie_host_init(struct pcie_port *pp)
+static int visconti_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
struct platform_device *pdev)
{
struct dw_pcie *pci = &pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
pp->irq = platform_get_irq_byname(pdev, "intr");
if (pp->irq < 0)
* Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_DEV_REV_REG 0x8
#define PCIE_CORE_PCIEXP_CAP 0xc0
+#define PCIE_CORE_PCIERR_CAP 0x100
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
switch (reg) {
- case PCI_EXP_SLTCTL:
- *value = PCI_EXP_SLTSTA_PDS << 16;
- return PCI_BRIDGE_EMUL_HANDLED;
-
/*
- * PCI_EXP_RTCTL and PCI_EXP_RTSTA are also supported, but do not need
- * to be handled here, because their values are stored in emulated
- * config space buffer, and we read them from there when needed.
+ * PCI_EXP_SLTCAP, PCI_EXP_SLTCTL, PCI_EXP_RTCTL and PCI_EXP_RTSTA are
+ * also supported, but do not need to be handled here, because their
+ * values are stored in emulated config space buffer, and we read them
+ * from there when needed.
*/
case PCI_EXP_LNKCAP: {
}
}
+static pci_bridge_emul_read_status_t
+advk_pci_bridge_emul_ext_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ case 0:
+ *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
+
+ /*
+ * PCI_EXT_CAP_NEXT bits are set to offset 0x150, but Armada
+ * 3700 Functional Specification does not document registers
+ * at those addresses.
+ *
+ * Thus we clear PCI_EXT_CAP_NEXT bits to make Advanced Error
+ * Reporting Capability header the last Extended Capability.
+ * If we obtain documentation for those registers in the
+ * future, this can be changed.
+ */
+ *value &= 0x000fffff;
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ case PCI_ERR_UNCOR_STATUS:
+ case PCI_ERR_UNCOR_MASK:
+ case PCI_ERR_UNCOR_SEVER:
+ case PCI_ERR_COR_STATUS:
+ case PCI_ERR_COR_MASK:
+ case PCI_ERR_CAP:
+ case PCI_ERR_HEADER_LOG + 0:
+ case PCI_ERR_HEADER_LOG + 4:
+ case PCI_ERR_HEADER_LOG + 8:
+ case PCI_ERR_HEADER_LOG + 12:
+ case PCI_ERR_ROOT_COMMAND:
+ case PCI_ERR_ROOT_STATUS:
+ case PCI_ERR_ROOT_ERR_SRC:
+ *value = advk_readl(pcie, PCIE_CORE_PCIERR_CAP + reg);
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+}
+
+static void
+advk_pci_bridge_emul_ext_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ /* These are W1C registers, so clear other bits */
+ case PCI_ERR_UNCOR_STATUS:
+ case PCI_ERR_COR_STATUS:
+ case PCI_ERR_ROOT_STATUS:
+ new &= mask;
+ fallthrough;
+
+ case PCI_ERR_UNCOR_MASK:
+ case PCI_ERR_UNCOR_SEVER:
+ case PCI_ERR_COR_MASK:
+ case PCI_ERR_CAP:
+ case PCI_ERR_HEADER_LOG + 0:
+ case PCI_ERR_HEADER_LOG + 4:
+ case PCI_ERR_HEADER_LOG + 8:
+ case PCI_ERR_HEADER_LOG + 12:
+ case PCI_ERR_ROOT_COMMAND:
+ case PCI_ERR_ROOT_ERR_SRC:
+ advk_writel(pcie, new, PCIE_CORE_PCIERR_CAP + reg);
+ break;
+
+ default:
+ break;
+ }
+}
+
static const struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
.read_base = advk_pci_bridge_emul_base_conf_read,
.write_base = advk_pci_bridge_emul_base_conf_write,
.read_pcie = advk_pci_bridge_emul_pcie_conf_read,
.write_pcie = advk_pci_bridge_emul_pcie_conf_write,
+ .read_ext = advk_pci_bridge_emul_ext_conf_read,
+ .write_ext = advk_pci_bridge_emul_ext_conf_write,
};
/*
/* Support interrupt A for MSI feature */
bridge->conf.intpin = PCI_INTERRUPT_INTA;
- /* Aardvark HW provides PCIe Capability structure in version 2 */
- bridge->pcie_conf.cap = cpu_to_le16(2);
+ /*
+ * Aardvark HW provides PCIe Capability structure in version 2 and
+ * indicate slot support, which is emulated.
+ */
+ bridge->pcie_conf.cap = cpu_to_le16(2 | PCI_EXP_FLAGS_SLOT);
+
+ /*
+ * Set Presence Detect State bit permanently since there is no support
+ * for unplugging the card nor detecting whether it is plugged. (If a
+ * platform exists in the future that supports it, via a GPIO for
+ * example, it should be implemented via this bit.)
+ *
+ * Set physical slot number to 1 since there is only one port and zero
+ * value is reserved for ports within the same silicon as Root Port
+ * which is not our case.
+ */
+ bridge->pcie_conf.slotcap = cpu_to_le32(FIELD_PREP(PCI_EXP_SLTCAP_PSN,
+ 1));
+ bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
/* Indicates supports for Completion Retry Status */
bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
#include <linux/pci.h>
#include <linux/pci-ecam.h>
#include <linux/printk.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
/* Forward declarations */
struct brcm_pcie;
-static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
-static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
enum {
RGR1_SW_INIT_1,
void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
};
-static const int pcie_offsets[] = {
- [RGR1_SW_INIT_1] = 0x9210,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
-};
-
-static const int pcie_offsets_bmips_7425[] = {
- [RGR1_SW_INIT_1] = 0x8010,
- [EXT_CFG_INDEX] = 0x8300,
- [EXT_CFG_DATA] = 0x8304,
-};
-
-static const struct pcie_cfg_data generic_cfg = {
- .offsets = pcie_offsets,
- .type = GENERIC,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const struct pcie_cfg_data bcm7425_cfg = {
- .offsets = pcie_offsets_bmips_7425,
- .type = BCM7425,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const struct pcie_cfg_data bcm7435_cfg = {
- .offsets = pcie_offsets,
- .type = BCM7435,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const struct pcie_cfg_data bcm4908_cfg = {
- .offsets = pcie_offsets,
- .type = BCM4908,
- .perst_set = brcm_pcie_perst_set_4908,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const int pcie_offset_bcm7278[] = {
- [RGR1_SW_INIT_1] = 0xc010,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
-};
-
-static const struct pcie_cfg_data bcm7278_cfg = {
- .offsets = pcie_offset_bcm7278,
- .type = BCM7278,
- .perst_set = brcm_pcie_perst_set_7278,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
-};
-
-static const struct pcie_cfg_data bcm2711_cfg = {
- .offsets = pcie_offsets,
- .type = BCM2711,
- .perst_set = brcm_pcie_perst_set_generic,
- .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+struct subdev_regulators {
+ unsigned int num_supplies;
+ struct regulator_bulk_data supplies[];
};
struct brcm_msi {
u32 hw_rev;
void (*perst_set)(struct brcm_pcie *pcie, u32 val);
void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ struct subdev_regulators *sr;
+ bool ep_wakeup_capable;
};
static inline bool is_bmips(const struct brcm_pcie *pcie)
return dla && plu;
}
-static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
- int where)
+static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
struct brcm_pcie *pcie = bus->sysdata;
void __iomem *base = pcie->base;
int idx;
- /* Accesses to the RC go right to the RC registers if slot==0 */
+ /* Accesses to the RC go right to the RC registers if !devfn */
if (pci_is_root_bus(bus))
- return PCI_SLOT(devfn) ? NULL : base + where;
+ return devfn ? NULL : base + PCIE_ECAM_REG(where);
+
+ /* An access to our HW w/o link-up will cause a CPU Abort */
+ if (!brcm_pcie_link_up(pcie))
+ return NULL;
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
- return base + PCIE_EXT_CFG_DATA + where;
+ return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where);
}
-static void __iomem *brcm_pcie_map_conf32(struct pci_bus *bus, unsigned int devfn,
- int where)
+static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
struct brcm_pcie *pcie = bus->sysdata;
void __iomem *base = pcie->base;
int idx;
- /* Accesses to the RC go right to the RC registers if slot==0 */
+ /* Accesses to the RC go right to the RC registers if !devfn */
if (pci_is_root_bus(bus))
- return PCI_SLOT(devfn) ? NULL : base + (where & ~0x3);
+ return devfn ? NULL : base + PCIE_ECAM_REG(where);
+
+ /* An access to our HW w/o link-up will cause a CPU Abort */
+ if (!brcm_pcie_link_up(pcie))
+ return NULL;
/* For devices, write to the config space index register */
- idx = PCIE_ECAM_OFFSET(bus->number, devfn, (where & ~3));
+ idx = PCIE_ECAM_OFFSET(bus->number, devfn, where);
writel(idx, base + IDX_ADDR(pcie));
return base + DATA_ADDR(pcie);
}
-static struct pci_ops brcm_pcie_ops = {
- .map_bus = brcm_pcie_map_conf,
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
-};
-
-static struct pci_ops brcm_pcie_ops32 = {
- .map_bus = brcm_pcie_map_conf32,
- .read = pci_generic_config_read32,
- .write = pci_generic_config_write32,
-};
-
static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
{
u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
static int brcm_pcie_setup(struct brcm_pcie *pcie)
{
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
u64 rc_bar2_offset, rc_bar2_size;
void __iomem *base = pcie->base;
- struct device *dev = pcie->dev;
+ struct pci_host_bridge *bridge;
struct resource_entry *entry;
- bool ssc_good = false;
- struct resource *res;
- int num_out_wins = 0;
- u16 nlw, cls, lnksta;
- int i, ret, memc;
u32 tmp, burst, aspm_support;
+ int num_out_wins = 0;
+ int ret, memc;
/* Reset the bridge */
pcie->bridge_sw_init_set(pcie, 1);
else
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
+ if (!brcm_pcie_rc_mode(pcie)) {
+ dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
+ return -EINVAL;
+ }
+
/* disable the PCIe->GISB memory window (RC_BAR1) */
tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
- if (pcie->gen)
- brcm_pcie_set_gen(pcie, pcie->gen);
-
- /* Unassert the fundamental reset */
- pcie->perst_set(pcie, 0);
+ /* Don't advertise L0s capability if 'aspm-no-l0s' */
+ aspm_support = PCIE_LINK_STATE_L1;
+ if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
+ aspm_support |= PCIE_LINK_STATE_L0S;
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ u32p_replace_bits(&tmp, aspm_support,
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
/*
- * Give the RC/EP time to wake up, before trying to configure RC.
- * Intermittently check status for link-up, up to a total of 100ms.
+ * For config space accesses on the RC, show the right class for
+ * a PCIe-PCIe bridge (the default setting is to be EP mode).
*/
- for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
- msleep(5);
-
- if (!brcm_pcie_link_up(pcie)) {
- dev_err(dev, "link down\n");
- return -ENODEV;
- }
-
- if (!brcm_pcie_rc_mode(pcie)) {
- dev_err(dev, "PCIe misconfigured; is in EP mode\n");
- return -EINVAL;
- }
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ u32p_replace_bits(&tmp, 0x060400,
+ PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ bridge = pci_host_bridge_from_priv(pcie);
resource_list_for_each_entry(entry, &bridge->windows) {
- res = entry->res;
+ struct resource *res = entry->res;
if (resource_type(res) != IORESOURCE_MEM)
continue;
num_out_wins++;
}
- /* Don't advertise L0s capability if 'aspm-no-l0s' */
- aspm_support = PCIE_LINK_STATE_L1;
- if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
- aspm_support |= PCIE_LINK_STATE_L0S;
- tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- u32p_replace_bits(&tmp, aspm_support,
- PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ /* PCIe->SCB endian mode for BAR */
+ tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+ u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
+ PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
+ writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+
+ return 0;
+}
+
+static int brcm_pcie_start_link(struct brcm_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ void __iomem *base = pcie->base;
+ u16 nlw, cls, lnksta;
+ bool ssc_good = false;
+ u32 tmp;
+ int ret, i;
+
+ /* Unassert the fundamental reset */
+ pcie->perst_set(pcie, 0);
/*
- * For config space accesses on the RC, show the right class for
- * a PCIe-PCIe bridge (the default setting is to be EP mode).
+ * Give the RC/EP time to wake up, before trying to configure RC.
+ * Intermittently check status for link-up, up to a total of 100ms.
*/
- tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
- u32p_replace_bits(&tmp, 0x060400,
- PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
+ msleep(5);
+
+ if (!brcm_pcie_link_up(pcie)) {
+ dev_err(dev, "link down\n");
+ return -ENODEV;
+ }
+
+ if (pcie->gen)
+ brcm_pcie_set_gen(pcie, pcie->gen);
if (pcie->ssc) {
ret = brcm_pcie_set_ssc(pcie);
pci_speed_string(pcie_link_speed[cls]), nlw,
ssc_good ? "(SSC)" : "(!SSC)");
- /* PCIe->SCB endian mode for BAR */
- tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
- u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
- PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
- writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
-
/*
* Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
* is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
return 0;
}
+static const char * const supplies[] = {
+ "vpcie3v3",
+ "vpcie3v3aux",
+ "vpcie12v",
+};
+
+static void *alloc_subdev_regulators(struct device *dev)
+{
+ const size_t size = sizeof(struct subdev_regulators) +
+ sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies);
+ struct subdev_regulators *sr;
+ int i;
+
+ sr = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (sr) {
+ sr->num_supplies = ARRAY_SIZE(supplies);
+ for (i = 0; i < ARRAY_SIZE(supplies); i++)
+ sr->supplies[i].supply = supplies[i];
+ }
+
+ return sr;
+}
+
+static int brcm_pcie_add_bus(struct pci_bus *bus)
+{
+ struct brcm_pcie *pcie = bus->sysdata;
+ struct device *dev = &bus->dev;
+ struct subdev_regulators *sr;
+ int ret;
+
+ if (!bus->parent || !pci_is_root_bus(bus->parent))
+ return 0;
+
+ if (dev->of_node) {
+ sr = alloc_subdev_regulators(dev);
+ if (!sr) {
+ dev_info(dev, "Can't allocate regulators for downstream device\n");
+ goto no_regulators;
+ }
+
+ pcie->sr = sr;
+
+ ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
+ if (ret) {
+ dev_info(dev, "No regulators for downstream device\n");
+ goto no_regulators;
+ }
+
+ ret = regulator_bulk_enable(sr->num_supplies, sr->supplies);
+ if (ret) {
+ dev_err(dev, "Can't enable regulators for downstream device\n");
+ regulator_bulk_free(sr->num_supplies, sr->supplies);
+ pcie->sr = NULL;
+ }
+ }
+
+no_regulators:
+ brcm_pcie_start_link(pcie);
+ return 0;
+}
+
+static void brcm_pcie_remove_bus(struct pci_bus *bus)
+{
+ struct brcm_pcie *pcie = bus->sysdata;
+ struct subdev_regulators *sr = pcie->sr;
+ struct device *dev = &bus->dev;
+
+ if (!sr)
+ return;
+
+ if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
+ dev_err(dev, "Failed to disable regulators for downstream device\n");
+ regulator_bulk_free(sr->num_supplies, sr->supplies);
+ pcie->sr = NULL;
+}
+
/* L23 is a low-power PCIe link state */
static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
{
pcie->bridge_sw_init_set(pcie, 1);
}
-static int brcm_pcie_suspend(struct device *dev)
+static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
+{
+ bool *ret = data;
+
+ if (device_may_wakeup(&dev->dev)) {
+ *ret = true;
+ dev_info(&dev->dev, "Possible wake-up device; regulators will not be disabled\n");
+ }
+ return (int) *ret;
+}
+
+static int brcm_pcie_suspend_noirq(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
int ret;
brcm_pcie_turn_off(pcie);
return ret;
}
+ if (pcie->sr) {
+ /*
+ * Now turn off the regulators, but if at least one
+ * downstream device is enabled as a wake-up source, do not
+ * turn off regulators.
+ */
+ pcie->ep_wakeup_capable = false;
+ pci_walk_bus(bridge->bus, pci_dev_may_wakeup,
+ &pcie->ep_wakeup_capable);
+ if (!pcie->ep_wakeup_capable) {
+ ret = regulator_bulk_disable(pcie->sr->num_supplies,
+ pcie->sr->supplies);
+ if (ret) {
+ dev_err(dev, "Could not turn off regulators\n");
+ reset_control_reset(pcie->rescal);
+ return ret;
+ }
+ }
+ }
clk_disable_unprepare(pcie->clk);
return 0;
}
-static int brcm_pcie_resume(struct device *dev)
+static int brcm_pcie_resume_noirq(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
void __iomem *base;
if (ret)
goto err_reset;
+ if (pcie->sr) {
+ if (pcie->ep_wakeup_capable) {
+ /*
+ * We are resuming from a suspend. In the suspend we
+ * did not disable the power supplies, so there is
+ * no need to enable them (and falsely increase their
+ * usage count).
+ */
+ pcie->ep_wakeup_capable = false;
+ } else {
+ ret = regulator_bulk_enable(pcie->sr->num_supplies,
+ pcie->sr->supplies);
+ if (ret) {
+ dev_err(dev, "Could not turn on regulators\n");
+ goto err_reset;
+ }
+ }
+ }
+
+ ret = brcm_pcie_start_link(pcie);
+ if (ret)
+ goto err_regulator;
+
if (pcie->msi)
brcm_msi_set_regs(pcie->msi);
return 0;
+err_regulator:
+ if (pcie->sr)
+ regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
err_reset:
reset_control_rearm(pcie->rescal);
err_disable_clk:
return 0;
}
+static const int pcie_offsets[] = {
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+};
+
+static const int pcie_offsets_bmips_7425[] = {
+ [RGR1_SW_INIT_1] = 0x8010,
+ [EXT_CFG_INDEX] = 0x8300,
+ [EXT_CFG_DATA] = 0x8304,
+};
+
+static const struct pcie_cfg_data generic_cfg = {
+ .offsets = pcie_offsets,
+ .type = GENERIC,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm7425_cfg = {
+ .offsets = pcie_offsets_bmips_7425,
+ .type = BCM7425,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm7435_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM7435,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm4908_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM4908,
+ .perst_set = brcm_pcie_perst_set_4908,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const int pcie_offset_bcm7278[] = {
+ [RGR1_SW_INIT_1] = 0xc010,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+};
+
+static const struct pcie_cfg_data bcm7278_cfg = {
+ .offsets = pcie_offset_bcm7278,
+ .type = BCM7278,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+};
+
+static const struct pcie_cfg_data bcm2711_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM2711,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
static const struct of_device_id brcm_pcie_match[] = {
{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
{},
};
+static struct pci_ops brcm_pcie_ops = {
+ .map_bus = brcm_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ .add_bus = brcm_pcie_add_bus,
+ .remove_bus = brcm_pcie_remove_bus,
+};
+
+static struct pci_ops brcm7425_pcie_ops = {
+ .map_bus = brcm7425_pcie_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+ .add_bus = brcm_pcie_add_bus,
+ .remove_bus = brcm_pcie_remove_bus,
+};
+
static int brcm_pcie_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node, *msi_np;
}
}
- bridge->ops = pcie->type == BCM7425 ? &brcm_pcie_ops32 : &brcm_pcie_ops;
+ bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
bridge->sysdata = pcie;
platform_set_drvdata(pdev, pcie);
- return pci_host_probe(bridge);
+ ret = pci_host_probe(bridge);
+ if (!ret && !brcm_pcie_link_up(pcie))
+ ret = -ENODEV;
+
+ if (ret) {
+ brcm_pcie_remove(pdev);
+ return ret;
+ }
+
+ return 0;
+
fail:
__brcm_pcie_remove(pcie);
return ret;
MODULE_DEVICE_TABLE(of, brcm_pcie_match);
static const struct dev_pm_ops brcm_pcie_pm_ops = {
- .suspend = brcm_pcie_suspend,
- .resume = brcm_pcie_resume,
+ .suspend_noirq = brcm_pcie_suspend_noirq,
+ .resume_noirq = brcm_pcie_resume_noirq,
};
static struct platform_driver brcm_pcie_driver = {
cancel_delayed_work(&epf_test->cmd_handler);
pci_epf_test_clean_dma_chan(epf_test);
- pci_epc_stop(epc);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
epf_bar = &epf->bar[bar];
#ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE
-/*
- * Modern setup: generic pci_mmap_resource_range(), and implement the legacy
- * pci_mmap_page_range() (if needed) as a wrapper round it.
- */
-
-#ifdef HAVE_PCI_MMAP
-int pci_mmap_page_range(struct pci_dev *pdev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- resource_size_t start, end;
-
- pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
-
- /* Adjust vm_pgoff to be the offset within the resource */
- vma->vm_pgoff -= start >> PAGE_SHIFT;
- return pci_mmap_resource_range(pdev, bar, vma, mmap_state,
- write_combine);
-}
-#endif
-
static const struct vm_operations_struct pci_phys_vm_ops = {
#ifdef CONFIG_HAVE_IOREMAP_PROT
.access = generic_access_phys,
vma->vm_page_prot);
}
-#elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */
-
-/*
- * Legacy setup: Implement pci_mmap_resource_range() as a wrapper around
- * the architecture's pci_mmap_page_range(), converting to "user visible"
- * addresses as necessary.
- */
-
-int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine)
-{
- resource_size_t start, end;
-
- /*
- * pci_mmap_page_range() expects the same kind of entry as coming
- * from /proc/bus/pci/ which is a "user visible" value. If this is
- * different from the resource itself, arch will do necessary fixup.
- */
- pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
- vma->vm_pgoff += start >> PAGE_SHIFT;
- return pci_mmap_page_range(pdev, bar, vma, mmap_state, write_combine);
-}
#endif
pci_restore_bars(dev);
}
- if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
-
return 0;
}
pci_power_name(dev->current_state),
pci_power_name(state));
- if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
-
return 0;
}
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
-void pcie_aspm_pm_state_change(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
-static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
#endif
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
pci_aer_clear_status(dev);
+
+ if (pci_aer_available())
+ pci_enable_pcie_error_reporting(dev);
+
+ pcie_set_ecrc_checking(dev);
}
void pci_aer_exit(struct pci_dev *dev)
u64 *stats = pdev->aer_stats->stats_array; \
size_t len = 0; \
\
- for (i = 0; i < ARRAY_SIZE(strings_array); i++) { \
+ for (i = 0; i < ARRAY_SIZE(pdev->aer_stats->stats_array); i++) {\
if (strings_array[i]) \
len += sysfs_emit_at(buf, len, "%s %llu\n", \
strings_array[i], \
pci_disable_pcie_error_reporting(dev);
}
- if (enable)
- pcie_set_ecrc_checking(dev);
-
return 0;
}
struct device *device = &dev->device;
struct pci_dev *port = dev->port;
+ BUILD_BUG_ON(ARRAY_SIZE(aer_correctable_error_string) <
+ AER_MAX_TYPEOF_COR_ERRS);
+ BUILD_BUG_ON(ARRAY_SIZE(aer_uncorrectable_error_string) <
+ AER_MAX_TYPEOF_UNCOR_ERRS);
+
/* Limit to Root Ports or Root Complex Event Collectors */
if ((pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC) &&
(pci_pcie_type(port) != PCI_EXP_TYPE_ROOT_PORT))
up_read(&pci_bus_sem);
}
-/* @pdev: the root port or switch downstream port */
-void pcie_aspm_pm_state_change(struct pci_dev *pdev)
-{
- struct pcie_link_state *link = pdev->link_state;
-
- if (aspm_disabled || !link)
- return;
- /*
- * Devices changed PM state, we should recheck if latency
- * meets all functions' requirement
- */
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
- pcie_update_aspm_capable(link->root);
- pcie_config_aspm_path(link);
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
-}
-
void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
{
struct pcie_link_state *link = pdev->link_state;
{
return aspm_support_enabled;
}
-EXPORT_SYMBOL(pcie_aspm_support_enabled);
device_lock(&dev->dev);
pdrv = dev->driver;
- if (!pci_dev_set_io_state(dev, state) ||
- !pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->error_detected) {
+ if (pci_dev_is_disconnected(dev)) {
+ vote = PCI_ERS_RESULT_DISCONNECT;
+ } else if (!pci_dev_set_io_state(dev, state)) {
+ pci_info(dev, "can't recover (state transition %u -> %u invalid)\n",
+ dev->error_state, state);
+ vote = PCI_ERS_RESULT_NONE;
+ } else if (!pdrv || !pdrv->err_handler ||
+ !pdrv->err_handler->error_detected) {
/*
* If any device in the subtree does not have an error_detected
* callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent
#ifdef CONFIG_PCIEAER
if (dev->aer_cap && pci_aer_available() &&
- (pcie_ports_native || host->native_aer)) {
+ (pcie_ports_native || host->native_aer))
services |= PCIE_PORT_SERVICE_AER;
-
- /*
- * Disable AER on this port in case it's been enabled by the
- * BIOS (the AER service driver will enable it when necessary).
- */
- pci_disable_pcie_error_reporting(dev);
- }
#endif
/* Root Ports and Root Complex Event Collectors may generate PMEs */
dev->broken_intx_masking = pci_intx_mask_broken(dev);
+ /* Clear errors left from system firmware */
+ pci_write_config_word(dev, PCI_STATUS, 0xffff);
+
switch (dev->hdr_type) { /* header type */
case PCI_HEADER_TYPE_NORMAL: /* standard header */
if (class == PCI_CLASS_BRIDGE_PCI)
}
EXPORT_SYMBOL(pci_scan_single_device);
-static unsigned int next_fn(struct pci_bus *bus, struct pci_dev *dev,
- unsigned int fn)
+static int next_ari_fn(struct pci_bus *bus, struct pci_dev *dev, int fn)
{
int pos;
u16 cap = 0;
unsigned int next_fn;
- if (pci_ari_enabled(bus)) {
- if (!dev)
- return 0;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
- if (!pos)
- return 0;
+ if (!dev)
+ return -ENODEV;
- pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
- next_fn = PCI_ARI_CAP_NFN(cap);
- if (next_fn <= fn)
- return 0; /* protect against malformed list */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
+ if (!pos)
+ return -ENODEV;
- return next_fn;
- }
+ pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
+ next_fn = PCI_ARI_CAP_NFN(cap);
+ if (next_fn <= fn)
+ return -ENODEV; /* protect against malformed list */
- /* dev may be NULL for non-contiguous multifunction devices */
- if (!dev || dev->multifunction)
- return (fn + 1) % 8;
+ return next_fn;
+}
- return 0;
+static int next_fn(struct pci_bus *bus, struct pci_dev *dev, int fn)
+{
+ if (pci_ari_enabled(bus))
+ return next_ari_fn(bus, dev, fn);
+
+ if (fn >= 7)
+ return -ENODEV;
+ /* only multifunction devices may have more functions */
+ if (dev && !dev->multifunction)
+ return -ENODEV;
+
+ return fn + 1;
}
static int only_one_child(struct pci_bus *bus)
*/
int pci_scan_slot(struct pci_bus *bus, int devfn)
{
- unsigned int fn, nr = 0;
struct pci_dev *dev;
+ int fn = 0, nr = 0;
if (only_one_child(bus) && (devfn > 0))
return 0; /* Already scanned the entire slot */
- dev = pci_scan_single_device(bus, devfn);
- if (!dev)
- return 0;
- if (!pci_dev_is_added(dev))
- nr++;
-
- for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
+ do {
dev = pci_scan_single_device(bus, devfn + fn);
if (dev) {
if (!pci_dev_is_added(dev))
nr++;
- dev->multifunction = 1;
+ if (fn > 0)
+ dev->multifunction = 1;
+ } else if (fn == 0) {
+ /*
+ * Function 0 is required unless we are running on
+ * a hypervisor that passes through individual PCI
+ * functions.
+ */
+ if (!hypervisor_isolated_pci_functions())
+ break;
}
- }
+ fn = next_fn(bus, dev, fn);
+ } while (fn >= 0);
/* Only one slot has PCIe device */
if (bus->self && nr)
{
unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
unsigned int start = bus->busn_res.start;
- unsigned int devfn, fn, cmax, max = start;
+ unsigned int devfn, cmax, max = start;
struct pci_dev *dev;
- int nr_devs;
dev_dbg(&bus->dev, "scanning bus\n");
/* Go find them, Rover! */
- for (devfn = 0; devfn < 256; devfn += 8) {
- nr_devs = pci_scan_slot(bus, devfn);
-
- /*
- * The Jailhouse hypervisor may pass individual functions of a
- * multi-function device to a guest without passing function 0.
- * Look for them as well.
- */
- if (jailhouse_paravirt() && nr_devs == 0) {
- for (fn = 1; fn < 8; fn++) {
- dev = pci_scan_single_device(bus, devfn + fn);
- if (dev)
- dev->multifunction = 1;
- }
- }
- }
+ for (devfn = 0; devfn < 256; devfn += 8)
+ pci_scan_slot(bus, devfn);
/* Reserve buses for SR-IOV capability */
used_buses = pci_iov_bus_range(bus);
{
struct pci_dev *dev = pde_data(file_inode(file));
struct pci_filp_private *fpriv = file->private_data;
+ resource_size_t start, end;
int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM;
if (!capable(CAP_SYS_RAWIO) ||
iomem_is_exclusive(dev->resource[i].start))
return -EINVAL;
- ret = pci_mmap_page_range(dev, i, vma,
+ pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
+
+ /* Adjust vm_pgoff to be the offset within the resource */
+ vma->vm_pgoff -= start >> PAGE_SHIFT;
+ ret = pci_mmap_resource_range(dev, i, vma,
fpriv->mmap_state, write_combine);
if (ret < 0)
return ret;
{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
/* Broadcom multi-function device */
{ PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1751, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1752, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
#endif /* !CONFIG_X86 */
+static inline bool hypervisor_isolated_pci_functions(void)
+{
+ if (IS_ENABLED(CONFIG_S390))
+ return true;
+
+ return jailhouse_paravirt();
+}
+
#endif /* __LINUX_HYPEVISOR_H */
#include <asm/pci.h>
-/* These two functions provide almost identical functionality. Depending
- * on the architecture, one will be implemented as a wrapper around the
- * other (in drivers/pci/mmap.c).
- *
+/*
* pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
* is expected to be an offset within that region.
*
- * pci_mmap_page_range() is the legacy architecture-specific interface,
- * which accepts a "user visible" resource address converted by
- * pci_resource_to_user(), as used in the legacy mmap() interface in
- * /proc/bus/pci/.
*/
int pci_mmap_resource_range(struct pci_dev *dev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
-int pci_mmap_page_range(struct pci_dev *pdev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
#ifndef arch_can_pci_mmap_wc
#define arch_can_pci_mmap_wc() 0