2 * Low-Level PCI Express Support for the SH7786
4 * Copyright (C) 2009 - 2010 Paul Mundt
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
10 #include <linux/pci.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
14 #include <linux/delay.h>
15 #include <linux/slab.h>
16 #include "pcie-sh7786.h"
17 #include <asm/sizes.h>
19 struct sh7786_pcie_port {
20 struct pci_channel *hose;
26 static struct sh7786_pcie_port *sh7786_pcie_ports;
27 static unsigned int nr_ports;
29 static struct sh7786_pcie_hwops {
30 int (*core_init)(void);
31 int (*port_init_hw)(struct sh7786_pcie_port *port);
34 static struct resource sh7786_pci0_resources[] = {
38 .end = 0xfd000000 + SZ_8M - 1,
39 .flags = IORESOURCE_IO,
41 .name = "PCIe0 MEM 0",
43 .end = 0xc0000000 + SZ_512M - 1,
44 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
46 .name = "PCIe0 MEM 1",
48 .end = 0x10000000 + SZ_64M - 1,
49 .flags = IORESOURCE_MEM,
51 .name = "PCIe0 MEM 2",
53 .end = 0xfe100000 + SZ_1M - 1,
54 .flags = IORESOURCE_MEM,
58 static struct resource sh7786_pci1_resources[] = {
62 .end = 0xfd800000 + SZ_8M - 1,
63 .flags = IORESOURCE_IO,
65 .name = "PCIe1 MEM 0",
67 .end = 0xa0000000 + SZ_512M - 1,
68 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
70 .name = "PCIe1 MEM 1",
72 .end = 0x30000000 + SZ_256M - 1,
73 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
75 .name = "PCIe1 MEM 2",
77 .end = 0xfe300000 + SZ_1M - 1,
78 .flags = IORESOURCE_MEM,
82 static struct resource sh7786_pci2_resources[] = {
86 .end = 0xfc800000 + SZ_4M - 1,
87 .flags = IORESOURCE_IO,
89 .name = "PCIe2 MEM 0",
91 .end = 0x80000000 + SZ_512M - 1,
92 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
94 .name = "PCIe2 MEM 1",
96 .end = 0x20000000 + SZ_256M - 1,
97 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
99 .name = "PCIe2 MEM 2",
101 .end = 0xfcd00000 + SZ_1M - 1,
102 .flags = IORESOURCE_MEM,
106 extern struct pci_ops sh7786_pci_ops;
108 #define DEFINE_CONTROLLER(start, idx) \
110 .pci_ops = &sh7786_pci_ops, \
111 .resources = sh7786_pci##idx##_resources, \
112 .nr_resources = ARRAY_SIZE(sh7786_pci##idx##_resources), \
118 static struct pci_channel sh7786_pci_channels[] = {
119 DEFINE_CONTROLLER(0xfe000000, 0),
120 DEFINE_CONTROLLER(0xfe200000, 1),
121 DEFINE_CONTROLLER(0xfcc00000, 2),
124 static void __devinit sh7786_pci_fixup(struct pci_dev *dev)
127 * Prevent enumeration of root complex resources.
129 if (pci_is_root_bus(dev->bus) && dev->devfn == 0) {
132 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
133 dev->resource[i].start = 0;
134 dev->resource[i].end = 0;
135 dev->resource[i].flags = 0;
139 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_SH7786,
142 static int phy_wait_for_ack(struct pci_channel *chan)
144 unsigned int timeout = 100;
147 if (pci_read_reg(chan, SH4A_PCIEPHYADRR) & (1 << BITS_ACK))
156 static int pci_wait_for_irq(struct pci_channel *chan, unsigned int mask)
158 unsigned int timeout = 100;
161 if ((pci_read_reg(chan, SH4A_PCIEINTR) & mask) == mask)
170 static void phy_write_reg(struct pci_channel *chan, unsigned int addr,
171 unsigned int lane, unsigned int data)
173 unsigned long phyaddr;
175 phyaddr = (1 << BITS_CMD) + ((lane & 0xf) << BITS_LANE) +
176 ((addr & 0xff) << BITS_ADR);
179 pci_write_reg(chan, data, SH4A_PCIEPHYDOUTR);
180 pci_write_reg(chan, phyaddr, SH4A_PCIEPHYADRR);
182 phy_wait_for_ack(chan);
185 pci_write_reg(chan, 0, SH4A_PCIEPHYDOUTR);
186 pci_write_reg(chan, 0, SH4A_PCIEPHYADRR);
188 phy_wait_for_ack(chan);
191 static int phy_init(struct pci_channel *chan)
194 unsigned int timeout = 100;
197 ctrl = pci_read_reg(chan, SH4A_PCIEPHYCTLR);
198 ctrl |= (1 << BITS_CKE);
199 pci_write_reg(chan, ctrl, SH4A_PCIEPHYCTLR);
201 /* Initialize the phy */
202 phy_write_reg(chan, 0x60, 0xf, 0x004b008b);
203 phy_write_reg(chan, 0x61, 0xf, 0x00007b41);
204 phy_write_reg(chan, 0x64, 0xf, 0x00ff4f00);
205 phy_write_reg(chan, 0x65, 0xf, 0x09070907);
206 phy_write_reg(chan, 0x66, 0xf, 0x00000010);
207 phy_write_reg(chan, 0x74, 0xf, 0x0007001c);
208 phy_write_reg(chan, 0x79, 0xf, 0x01fc000d);
209 phy_write_reg(chan, 0xb0, 0xf, 0x00000610);
211 /* Deassert Standby */
212 phy_write_reg(chan, 0x67, 0x1, 0x00000400);
215 ctrl = pci_read_reg(chan, SH4A_PCIEPHYCTLR);
216 ctrl &= ~(1 << BITS_CKE);
217 pci_write_reg(chan, ctrl, SH4A_PCIEPHYCTLR);
220 if (pci_read_reg(chan, SH4A_PCIEPHYSR))
229 static void pcie_reset(struct sh7786_pcie_port *port)
231 struct pci_channel *chan = port->hose;
233 pci_write_reg(chan, 1, SH4A_PCIESRSTR);
234 pci_write_reg(chan, 0, SH4A_PCIETCTLR);
235 pci_write_reg(chan, 0, SH4A_PCIESRSTR);
236 pci_write_reg(chan, 0, SH4A_PCIETXVC0SR);
239 static int pcie_init(struct sh7786_pcie_port *port)
241 struct pci_channel *chan = port->hose;
247 /* Begin initialization */
251 * Initial header for port config space is type 1, set the device
252 * class to match. Hardware takes care of propagating the IDSETR
253 * settings, so there is no need to bother with a quirk.
255 pci_write_reg(chan, PCI_CLASS_BRIDGE_PCI << 16, SH4A_PCIEIDSETR1);
257 /* Initialize default capabilities. */
258 data = pci_read_reg(chan, SH4A_PCIEEXPCAP0);
259 data &= ~(PCI_EXP_FLAGS_TYPE << 16);
262 data |= PCI_EXP_TYPE_ENDPOINT << 20;
264 data |= PCI_EXP_TYPE_ROOT_PORT << 20;
266 data |= PCI_CAP_ID_EXP;
267 pci_write_reg(chan, data, SH4A_PCIEEXPCAP0);
269 /* Enable data link layer active state reporting */
270 pci_write_reg(chan, PCI_EXP_LNKCAP_DLLLARC, SH4A_PCIEEXPCAP3);
272 /* Enable extended sync and ASPM L0s support */
273 data = pci_read_reg(chan, SH4A_PCIEEXPCAP4);
274 data &= ~PCI_EXP_LNKCTL_ASPMC;
275 data |= PCI_EXP_LNKCTL_ES | 1;
276 pci_write_reg(chan, data, SH4A_PCIEEXPCAP4);
278 /* Write out the physical slot number */
279 data = pci_read_reg(chan, SH4A_PCIEEXPCAP5);
280 data &= ~PCI_EXP_SLTCAP_PSN;
281 data |= (port->index + 1) << 19;
282 pci_write_reg(chan, data, SH4A_PCIEEXPCAP5);
284 /* Set the completion timer timeout to the maximum 32ms. */
285 data = pci_read_reg(chan, SH4A_PCIETLCTLR);
288 pci_write_reg(chan, data, SH4A_PCIETLCTLR);
291 * Set fast training sequences to the maximum 255,
292 * and enable MAC data scrambling.
294 data = pci_read_reg(chan, SH4A_PCIEMACCTLR);
295 data &= ~PCIEMACCTLR_SCR_DIS;
296 data |= (0xff << 16);
297 pci_write_reg(chan, data, SH4A_PCIEMACCTLR);
299 memphys = __pa(memory_start);
300 memsize = roundup_pow_of_two(memory_end - memory_start);
303 * If there's more than 512MB of memory, we need to roll over to
306 if (memsize > SZ_512M) {
307 __raw_writel(memphys + SZ_512M, chan->reg_base + SH4A_PCIELAR1);
308 __raw_writel(((memsize - SZ_512M) - SZ_256) | 1,
309 chan->reg_base + SH4A_PCIELAMR1);
313 * Otherwise just zero it out and disable it.
315 __raw_writel(0, chan->reg_base + SH4A_PCIELAR1);
316 __raw_writel(0, chan->reg_base + SH4A_PCIELAMR1);
320 * LAR0/LAMR0 covers up to the first 512MB, which is enough to
321 * cover all of lowmem on most platforms.
323 __raw_writel(memphys, chan->reg_base + SH4A_PCIELAR0);
324 __raw_writel((memsize - SZ_256) | 1, chan->reg_base + SH4A_PCIELAMR0);
326 /* Finish initialization */
327 data = pci_read_reg(chan, SH4A_PCIETCTLR);
329 pci_write_reg(chan, data, SH4A_PCIETCTLR);
331 /* Let things settle down a bit.. */
334 /* Enable DL_Active Interrupt generation */
335 data = pci_read_reg(chan, SH4A_PCIEDLINTENR);
336 data |= PCIEDLINTENR_DLL_ACT_ENABLE;
337 pci_write_reg(chan, data, SH4A_PCIEDLINTENR);
339 /* Disable MAC data scrambling. */
340 data = pci_read_reg(chan, SH4A_PCIEMACCTLR);
341 data |= PCIEMACCTLR_SCR_DIS | (0xff << 16);
342 pci_write_reg(chan, data, SH4A_PCIEMACCTLR);
345 * This will timeout if we don't have a link, but we permit the
346 * port to register anyways in order to support hotplug on future
349 ret = pci_wait_for_irq(chan, MASK_INT_TX_CTRL);
351 data = pci_read_reg(chan, SH4A_PCIEPCICONF1);
352 data &= ~(PCI_STATUS_DEVSEL_MASK << 16);
353 data |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
354 (PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_FAST) << 16;
355 pci_write_reg(chan, data, SH4A_PCIEPCICONF1);
357 pci_write_reg(chan, 0x80888000, SH4A_PCIETXVC0DCTLR);
358 pci_write_reg(chan, 0x00222000, SH4A_PCIERXVC0DCTLR);
363 data = pci_read_reg(chan, SH4A_PCIEMACSR);
364 printk(KERN_NOTICE "PCI: PCIe#%d x%d link detected\n",
365 port->index, (data >> 20) & 0x3f);
367 printk(KERN_NOTICE "PCI: PCIe#%d link down\n",
370 for (i = win = 0; i < chan->nr_resources; i++) {
371 struct resource *res = chan->resources + i;
372 resource_size_t size;
376 * We can't use the 32-bit mode windows in legacy 29-bit
377 * mode, so just skip them entirely.
379 if ((res->flags & IORESOURCE_MEM_32BIT) && __in_29bit_mode())
382 pci_write_reg(chan, 0x00000000, SH4A_PCIEPTCTLR(win));
384 size = resource_size(res);
387 * The PAMR mask is calculated in units of 256kB, which
388 * keeps things pretty simple.
390 __raw_writel(((roundup_pow_of_two(size) / SZ_256K) - 1) << 18,
391 chan->reg_base + SH4A_PCIEPAMR(win));
393 pci_write_reg(chan, res->start, SH4A_PCIEPARL(win));
394 pci_write_reg(chan, 0x00000000, SH4A_PCIEPARH(win));
396 enable_mask = MASK_PARE;
397 if (res->flags & IORESOURCE_IO)
398 enable_mask |= MASK_SPC;
400 pci_write_reg(chan, enable_mask, SH4A_PCIEPTCTLR(win));
408 int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
413 static int sh7786_pcie_core_init(void)
415 /* Return the number of ports */
416 return test_mode_pin(MODE_PIN12) ? 3 : 2;
419 static int __devinit sh7786_pcie_init_hw(struct sh7786_pcie_port *port)
423 ret = phy_init(port->hose);
424 if (unlikely(ret < 0))
428 * Check if we are configured in endpoint or root complex mode,
429 * this is a fixed pin setting that applies to all PCIe ports.
431 port->endpoint = test_mode_pin(MODE_PIN11);
433 ret = pcie_init(port);
434 if (unlikely(ret < 0))
437 return register_pci_controller(port->hose);
440 static struct sh7786_pcie_hwops sh7786_65nm_pcie_hwops __initdata = {
441 .core_init = sh7786_pcie_core_init,
442 .port_init_hw = sh7786_pcie_init_hw,
445 static int __init sh7786_pcie_init(void)
449 printk(KERN_NOTICE "PCI: Starting initialization.\n");
451 sh7786_pcie_hwops = &sh7786_65nm_pcie_hwops;
453 nr_ports = sh7786_pcie_hwops->core_init();
454 BUG_ON(nr_ports > ARRAY_SIZE(sh7786_pci_channels));
456 if (unlikely(nr_ports == 0))
459 sh7786_pcie_ports = kzalloc(nr_ports * sizeof(struct sh7786_pcie_port),
461 if (unlikely(!sh7786_pcie_ports))
464 printk(KERN_NOTICE "PCI: probing %d ports.\n", nr_ports);
466 for (i = 0; i < nr_ports; i++) {
467 struct sh7786_pcie_port *port = sh7786_pcie_ports + i;
470 port->hose = sh7786_pci_channels + i;
471 port->hose->io_map_base = port->hose->resources[0].start;
473 ret |= sh7786_pcie_hwops->port_init_hw(port);
481 arch_initcall(sh7786_pcie_init);