1 // SPDX-License-Identifier: GPL-2.0+
3 * PCIe host controller driver for Tegra194 SoC
5 * Copyright (C) 2019 NVIDIA Corporation.
7 * Author: Vidya Sagar <vidyas@nvidia.com>
10 #include <linux/clk.h>
11 #include <linux/debugfs.h>
12 #include <linux/delay.h>
13 #include <linux/gpio.h>
14 #include <linux/gpio/consumer.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
20 #include <linux/of_device.h>
21 #include <linux/of_gpio.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_pci.h>
24 #include <linux/pci.h>
25 #include <linux/pci-acpi.h>
26 #include <linux/pci-ecam.h>
27 #include <linux/phy/phy.h>
28 #include <linux/pinctrl/consumer.h>
29 #include <linux/platform_device.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/random.h>
32 #include <linux/reset.h>
33 #include <linux/resource.h>
34 #include <linux/types.h>
35 #include "pcie-designware.h"
36 #include <soc/tegra/bpmp.h>
37 #include <soc/tegra/bpmp-abi.h>
38 #include "../../pci.h"
40 #define APPL_PINMUX 0x0
41 #define APPL_PINMUX_PEX_RST BIT(0)
42 #define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2)
43 #define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3)
44 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4)
45 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5)
48 #define APPL_CTRL_SYS_PRE_DET_STATE BIT(6)
49 #define APPL_CTRL_LTSSM_EN BIT(7)
50 #define APPL_CTRL_HW_HOT_RST_EN BIT(20)
51 #define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0)
52 #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22
53 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1
55 #define APPL_INTR_EN_L0_0 0x8
56 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
57 #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4)
58 #define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8)
59 #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15)
60 #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19)
61 #define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30)
62 #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31)
64 #define APPL_INTR_STATUS_L0 0xC
65 #define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0)
66 #define APPL_INTR_STATUS_L0_INT_INT BIT(8)
67 #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15)
68 #define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16)
69 #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18)
71 #define APPL_INTR_EN_L1_0_0 0x1C
72 #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1)
73 #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3)
74 #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30)
76 #define APPL_INTR_STATUS_L1_0_0 0x20
77 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1)
78 #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3)
79 #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30)
81 #define APPL_INTR_STATUS_L1_1 0x2C
82 #define APPL_INTR_STATUS_L1_2 0x30
83 #define APPL_INTR_STATUS_L1_3 0x34
84 #define APPL_INTR_STATUS_L1_6 0x3C
85 #define APPL_INTR_STATUS_L1_7 0x40
86 #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1)
88 #define APPL_INTR_EN_L1_8_0 0x44
89 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
90 #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3)
91 #define APPL_INTR_EN_L1_8_INTX_EN BIT(11)
92 #define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15)
94 #define APPL_INTR_STATUS_L1_8_0 0x4C
95 #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6)
96 #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2)
97 #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3)
99 #define APPL_INTR_STATUS_L1_9 0x54
100 #define APPL_INTR_STATUS_L1_10 0x58
101 #define APPL_INTR_STATUS_L1_11 0x64
102 #define APPL_INTR_STATUS_L1_13 0x74
103 #define APPL_INTR_STATUS_L1_14 0x78
104 #define APPL_INTR_STATUS_L1_15 0x7C
105 #define APPL_INTR_STATUS_L1_17 0x88
107 #define APPL_INTR_EN_L1_18 0x90
108 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2)
109 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
110 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
112 #define APPL_INTR_STATUS_L1_18 0x94
113 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2)
114 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
115 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
117 #define APPL_MSI_CTRL_1 0xAC
119 #define APPL_MSI_CTRL_2 0xB0
121 #define APPL_LEGACY_INTX 0xB8
123 #define APPL_LTR_MSG_1 0xC4
124 #define LTR_MSG_REQ BIT(15)
125 #define LTR_MST_NO_SNOOP_SHIFT 16
127 #define APPL_LTR_MSG_2 0xC8
128 #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3)
130 #define APPL_LINK_STATUS 0xCC
131 #define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0)
133 #define APPL_DEBUG 0xD0
134 #define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21)
135 #define APPL_DEBUG_PM_LINKST_IN_L0 0x11
136 #define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3)
137 #define APPL_DEBUG_LTSSM_STATE_SHIFT 3
138 #define LTSSM_STATE_PRE_DETECT 5
140 #define APPL_RADM_STATUS 0xE4
141 #define APPL_PM_XMT_TURNOFF_STATE BIT(0)
143 #define APPL_DM_TYPE 0x100
144 #define APPL_DM_TYPE_MASK GENMASK(3, 0)
145 #define APPL_DM_TYPE_RP 0x4
146 #define APPL_DM_TYPE_EP 0x0
148 #define APPL_CFG_BASE_ADDR 0x104
149 #define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12)
151 #define APPL_CFG_IATU_DMA_BASE_ADDR 0x108
152 #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18)
154 #define APPL_CFG_MISC 0x110
155 #define APPL_CFG_MISC_SLV_EP_MODE BIT(14)
156 #define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10)
157 #define APPL_CFG_MISC_ARCACHE_SHIFT 10
158 #define APPL_CFG_MISC_ARCACHE_VAL 3
160 #define APPL_CFG_SLCG_OVERRIDE 0x114
161 #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0)
163 #define APPL_CAR_RESET_OVRD 0x12C
164 #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0)
166 #define IO_BASE_IO_DECODE BIT(0)
167 #define IO_BASE_IO_DECODE_BIT8 BIT(8)
169 #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0)
170 #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16)
172 #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718
173 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19)
175 #define EVENT_COUNTER_ALL_CLEAR 0x3
176 #define EVENT_COUNTER_ENABLE_ALL 0x7
177 #define EVENT_COUNTER_ENABLE_SHIFT 2
178 #define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0)
179 #define EVENT_COUNTER_EVENT_SEL_SHIFT 16
180 #define EVENT_COUNTER_EVENT_Tx_L0S 0x2
181 #define EVENT_COUNTER_EVENT_Rx_L0S 0x3
182 #define EVENT_COUNTER_EVENT_L1 0x5
183 #define EVENT_COUNTER_EVENT_L1_1 0x7
184 #define EVENT_COUNTER_EVENT_L1_2 0x8
185 #define EVENT_COUNTER_GROUP_SEL_SHIFT 24
186 #define EVENT_COUNTER_GROUP_5 0x5
191 #define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828
193 #define GEN3_EQ_CONTROL_OFF 0x8a8
194 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
195 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
196 #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
198 #define GEN3_RELATED_OFF 0x890
199 #define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
200 #define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
201 #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
202 #define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
204 #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
205 #define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
206 #define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
207 #define AMBA_ERROR_RESPONSE_CRS_OKAY 0
208 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
209 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
211 #define MSIX_ADDR_MATCH_LOW_OFF 0x940
212 #define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
213 #define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2)
215 #define MSIX_ADDR_MATCH_HIGH_OFF 0x944
216 #define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0)
218 #define PORT_LOGIC_MSIX_DOORBELL 0x948
220 #define CAP_SPCIE_CAP_OFF 0x154
221 #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0)
222 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8)
223 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8
225 #define PME_ACK_TIMEOUT 10000
227 #define LTSSM_TIMEOUT 50000 /* 50ms */
229 #define GEN3_GEN4_EQ_PRESET_INIT 5
231 #define GEN1_CORE_CLK_FREQ 62500000
232 #define GEN2_CORE_CLK_FREQ 125000000
233 #define GEN3_CORE_CLK_FREQ 250000000
234 #define GEN4_CORE_CLK_FREQ 500000000
236 #define LTR_MSG_TIMEOUT (100 * 1000)
238 #define PERST_DEBOUNCE_TIME (5 * 1000)
240 #define EP_STATE_DISABLED 0
241 #define EP_STATE_ENABLED 1
243 static const unsigned int pcie_gen_freq[] = {
250 static const u32 event_cntr_ctrl_offset[] = {
259 static const u32 event_cntr_data_offset[] = {
268 struct tegra_pcie_dw {
270 struct resource *appl_res;
271 struct resource *dbi_res;
272 struct resource *atu_dma_res;
273 void __iomem *appl_base;
274 struct clk *core_clk;
275 struct reset_control *core_apb_rst;
276 struct reset_control *core_rst;
278 struct tegra_bpmp *bpmp;
280 enum dw_pcie_device_mode mode;
282 bool supports_clkreq;
283 bool enable_cdm_check;
285 bool update_fc_fixup;
290 u32 cfg_link_cap_l1sub;
294 u32 aspm_l0s_enter_lat;
296 struct regulator *pex_ctl_supply;
297 struct regulator *slot_ctl_3v3;
298 struct regulator *slot_ctl_12v;
300 unsigned int phy_count;
303 struct dentry *debugfs;
305 /* Endpoint mode specific */
306 struct gpio_desc *pex_rst_gpiod;
307 struct gpio_desc *pex_refclk_sel_gpiod;
308 unsigned int pex_rst_irq;
312 struct tegra_pcie_dw_of_data {
313 enum dw_pcie_device_mode mode;
316 #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
317 struct tegra194_pcie_ecam {
318 void __iomem *config_base;
319 void __iomem *iatu_base;
320 void __iomem *dbi_base;
323 static int tegra194_acpi_init(struct pci_config_window *cfg)
325 struct device *dev = cfg->parent;
326 struct tegra194_pcie_ecam *pcie_ecam;
328 pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
332 pcie_ecam->config_base = cfg->win;
333 pcie_ecam->iatu_base = cfg->win + SZ_256K;
334 pcie_ecam->dbi_base = cfg->win + SZ_512K;
335 cfg->priv = pcie_ecam;
340 static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
343 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
345 writel(val, pcie_ecam->iatu_base + offset + reg);
348 static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
349 int index, int type, u64 cpu_addr,
350 u64 pci_addr, u64 size)
352 atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
353 PCIE_ATU_LOWER_BASE);
354 atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
355 PCIE_ATU_UPPER_BASE);
356 atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
357 PCIE_ATU_LOWER_TARGET);
358 atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
360 atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
361 PCIE_ATU_UPPER_TARGET);
362 atu_reg_write(pcie_ecam, index, type, PCIE_ATU_CR1);
363 atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
366 static void __iomem *tegra194_map_bus(struct pci_bus *bus,
367 unsigned int devfn, int where)
369 struct pci_config_window *cfg = bus->sysdata;
370 struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
374 if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
377 if (bus->number == cfg->busr.start) {
378 if (PCI_SLOT(devfn) == 0)
379 return pcie_ecam->dbi_base + where;
384 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
385 PCIE_ATU_FUNC(PCI_FUNC(devfn));
387 if (bus->parent->number == cfg->busr.start) {
388 if (PCI_SLOT(devfn) == 0)
389 type = PCIE_ATU_TYPE_CFG0;
393 type = PCIE_ATU_TYPE_CFG1;
396 program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
399 return pcie_ecam->config_base + where;
402 const struct pci_ecam_ops tegra194_pcie_ops = {
403 .init = tegra194_acpi_init,
405 .map_bus = tegra194_map_bus,
406 .read = pci_generic_config_read,
407 .write = pci_generic_config_write,
410 #endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
412 #ifdef CONFIG_PCIE_TEGRA194
414 static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
416 return container_of(pci, struct tegra_pcie_dw, pci);
419 static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
422 writel_relaxed(value, pcie->appl_base + reg);
425 static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
427 return readl_relaxed(pcie->appl_base + reg);
430 struct tegra_pcie_soc {
431 enum dw_pcie_device_mode mode;
434 static void apply_bad_link_workaround(struct pcie_port *pp)
436 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
437 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
438 u32 current_link_width;
442 * NOTE:- Since this scenario is uncommon and link as such is not
443 * stable anyway, not waiting to confirm if link is really
444 * transitioning to Gen-2 speed
446 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
447 if (val & PCI_EXP_LNKSTA_LBMS) {
448 current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
449 PCI_EXP_LNKSTA_NLW_SHIFT;
450 if (pcie->init_link_width > current_link_width) {
451 dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
452 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
454 val &= ~PCI_EXP_LNKCTL2_TLS;
455 val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
456 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
457 PCI_EXP_LNKCTL2, val);
459 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
461 val |= PCI_EXP_LNKCTL_RL;
462 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
463 PCI_EXP_LNKCTL, val);
468 static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
470 struct tegra_pcie_dw *pcie = arg;
471 struct dw_pcie *pci = &pcie->pci;
472 struct pcie_port *pp = &pci->pp;
476 val = appl_readl(pcie, APPL_INTR_STATUS_L0);
477 if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
478 val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
479 if (val & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
480 appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
482 /* SBR & Surprise Link Down WAR */
483 val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
484 val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
485 appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
487 val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
488 val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
489 appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
491 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
492 val |= PORT_LOGIC_SPEED_CHANGE;
493 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
497 if (val & APPL_INTR_STATUS_L0_INT_INT) {
498 val = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
499 if (val & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
501 APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
502 APPL_INTR_STATUS_L1_8_0);
503 apply_bad_link_workaround(pp);
505 if (val & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
507 APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
508 APPL_INTR_STATUS_L1_8_0);
510 val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
512 dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
517 val = appl_readl(pcie, APPL_INTR_STATUS_L0);
518 if (val & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
519 val = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
520 tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
521 if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
522 dev_info(pci->dev, "CDM check complete\n");
523 tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
525 if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
526 dev_err(pci->dev, "CDM comparison mismatch\n");
527 tmp |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
529 if (val & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
530 dev_err(pci->dev, "CDM Logic error\n");
531 tmp |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
533 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, tmp);
534 tmp = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
535 dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", tmp);
541 static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
545 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
546 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
547 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
548 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
549 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
550 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
551 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
552 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
553 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
554 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
555 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
556 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
557 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
558 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
559 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
560 appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
562 val = appl_readl(pcie, APPL_CTRL);
563 val |= APPL_CTRL_LTSSM_EN;
564 appl_writel(pcie, val, APPL_CTRL);
567 static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
569 struct tegra_pcie_dw *pcie = arg;
570 struct dw_pcie *pci = &pcie->pci;
573 speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
575 clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
577 /* If EP doesn't advertise L1SS, just return */
578 val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
579 if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
582 /* Check if BME is set to '1' */
583 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
584 if (val & PCI_COMMAND_MASTER) {
587 /* 110us for both snoop and no-snoop */
588 val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
589 val |= (val << LTR_MST_NO_SNOOP_SHIFT);
590 appl_writel(pcie, val, APPL_LTR_MSG_1);
592 /* Send LTR upstream */
593 val = appl_readl(pcie, APPL_LTR_MSG_2);
594 val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
595 appl_writel(pcie, val, APPL_LTR_MSG_2);
597 timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
599 val = appl_readl(pcie, APPL_LTR_MSG_2);
600 if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
602 if (ktime_after(ktime_get(), timeout))
604 usleep_range(1000, 1100);
606 if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
607 dev_err(pcie->dev, "Failed to send LTR message\n");
613 static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
615 struct tegra_pcie_dw *pcie = arg;
616 struct dw_pcie_ep *ep = &pcie->pci.ep;
620 val = appl_readl(pcie, APPL_INTR_STATUS_L0);
621 if (val & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
622 val = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
623 appl_writel(pcie, val, APPL_INTR_STATUS_L1_0_0);
625 if (val & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
626 pex_ep_event_hot_rst_done(pcie);
628 if (val & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
629 tmp = appl_readl(pcie, APPL_LINK_STATUS);
630 if (tmp & APPL_LINK_STATUS_RDLH_LINK_UP) {
631 dev_dbg(pcie->dev, "Link is up with Host\n");
632 dw_pcie_ep_linkup(ep);
639 if (val & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
640 val = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
641 appl_writel(pcie, val, APPL_INTR_STATUS_L1_15);
643 if (val & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
644 return IRQ_WAKE_THREAD;
650 dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
652 appl_writel(pcie, val, APPL_INTR_STATUS_L0);
658 static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
662 * This is an endpoint mode specific register happen to appear even
663 * when controller is operating in root port mode and system hangs
664 * when it is accessed with link being in ASPM-L1 state.
665 * So skip accessing it altogether
667 if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
669 return PCIBIOS_SUCCESSFUL;
672 return pci_generic_config_read(bus, devfn, where, size, val);
675 static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
679 * This is an endpoint mode specific register happen to appear even
680 * when controller is operating in root port mode and system hangs
681 * when it is accessed with link being in ASPM-L1 state.
682 * So skip accessing it altogether
684 if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
685 return PCIBIOS_SUCCESSFUL;
687 return pci_generic_config_write(bus, devfn, where, size, val);
690 static struct pci_ops tegra_pci_ops = {
691 .map_bus = dw_pcie_own_conf_map_bus,
692 .read = tegra_pcie_dw_rd_own_conf,
693 .write = tegra_pcie_dw_wr_own_conf,
696 #if defined(CONFIG_PCIEASPM)
697 static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
701 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
702 val &= ~PCI_L1SS_CAP_ASPM_L1_1;
703 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
706 static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
710 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
711 val &= ~PCI_L1SS_CAP_ASPM_L1_2;
712 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
715 static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
719 val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]);
720 val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
721 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
722 val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
723 val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
724 dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
725 val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]);
730 static int aspm_state_cnt(struct seq_file *s, void *data)
732 struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
733 dev_get_drvdata(s->private);
736 seq_printf(s, "Tx L0s entry count : %u\n",
737 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
739 seq_printf(s, "Rx L0s entry count : %u\n",
740 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
742 seq_printf(s, "Link L1 entry count : %u\n",
743 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
745 seq_printf(s, "Link L1.1 entry count : %u\n",
746 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
748 seq_printf(s, "Link L1.2 entry count : %u\n",
749 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
751 /* Clear all counters */
752 dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid],
753 EVENT_COUNTER_ALL_CLEAR);
755 /* Re-enable counting */
756 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
757 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
758 dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
763 static void init_host_aspm(struct tegra_pcie_dw *pcie)
765 struct dw_pcie *pci = &pcie->pci;
768 val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
769 pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
771 /* Enable ASPM counters */
772 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
773 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
774 dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val);
776 /* Program T_cmrt and T_pwr_on values */
777 val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
778 val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
779 val |= (pcie->aspm_cmrt << 8);
780 val |= (pcie->aspm_pwr_on_t << 19);
781 dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
783 /* Program L0s and L1 entrance latencies */
784 val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
785 val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
786 val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT);
787 val |= PORT_AFR_ENTER_ASPM;
788 dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
791 static void init_debugfs(struct tegra_pcie_dw *pcie)
793 debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
797 static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
798 static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
799 static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
800 static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
803 static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
805 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
806 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
810 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
811 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
812 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
814 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
815 val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
816 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
818 if (pcie->enable_cdm_check) {
819 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
820 val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN;
821 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
823 val = appl_readl(pcie, APPL_INTR_EN_L1_18);
824 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
825 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
826 appl_writel(pcie, val, APPL_INTR_EN_L1_18);
829 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
831 pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
832 PCI_EXP_LNKSTA_NLW_SHIFT;
834 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
836 val_w |= PCI_EXP_LNKCTL_LBMIE;
837 dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
841 static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
843 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
844 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
847 /* Enable legacy interrupt generation */
848 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
849 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
850 val |= APPL_INTR_EN_L0_0_INT_INT_EN;
851 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
853 val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
854 val |= APPL_INTR_EN_L1_8_INTX_EN;
855 val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
856 val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
857 if (IS_ENABLED(CONFIG_PCIEAER))
858 val |= APPL_INTR_EN_L1_8_AER_INT_EN;
859 appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
862 static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
864 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
865 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
868 /* Enable MSI interrupt generation */
869 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
870 val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
871 val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
872 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
875 static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
877 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
878 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
880 /* Clear interrupt statuses before enabling interrupts */
881 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
882 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
883 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
884 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
885 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
886 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
887 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
888 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
889 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
890 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
891 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
892 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
893 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
894 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
895 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
897 tegra_pcie_enable_system_interrupts(pp);
898 tegra_pcie_enable_legacy_interrupts(pp);
899 if (IS_ENABLED(CONFIG_PCI_MSI))
900 tegra_pcie_enable_msi_interrupts(pp);
903 static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
905 struct dw_pcie *pci = &pcie->pci;
908 /* Program init preset */
909 for (i = 0; i < pcie->num_lanes; i++) {
910 val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));
911 val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
912 val |= GEN3_GEN4_EQ_PRESET_INIT;
913 val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
914 val |= (GEN3_GEN4_EQ_PRESET_INIT <<
915 CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
916 dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);
918 offset = dw_pcie_find_ext_capability(pci,
919 PCI_EXT_CAP_ID_PL_16GT) +
921 val = dw_pcie_readb_dbi(pci, offset + i);
922 val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
923 val |= GEN3_GEN4_EQ_PRESET_INIT;
924 val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
925 val |= (GEN3_GEN4_EQ_PRESET_INIT <<
926 PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
927 dw_pcie_writeb_dbi(pci, offset + i, val);
930 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
931 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
932 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
934 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
935 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
936 val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
937 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
938 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
940 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
941 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
942 val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
943 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
945 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
946 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
947 val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
948 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
949 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
951 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
952 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
953 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
956 static int tegra_pcie_dw_host_init(struct pcie_port *pp)
958 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
959 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
962 pp->bridge->ops = &tegra_pci_ops;
964 if (!pcie->pcie_cap_base)
965 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
968 val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
969 val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
970 dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
972 val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
973 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
974 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
975 dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
977 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
979 /* Enable as 0xFFFF0001 response for CRS */
980 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
981 val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
982 val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
983 AMBA_ERROR_RESPONSE_CRS_SHIFT);
984 dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
986 /* Configure Max lane width from DT */
987 val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
988 val &= ~PCI_EXP_LNKCAP_MLW;
989 val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
990 dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
992 config_gen3_gen4_eq_presets(pcie);
994 init_host_aspm(pcie);
996 /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
997 if (!pcie->supports_clkreq) {
998 disable_aspm_l11(pcie);
999 disable_aspm_l12(pcie);
1002 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
1003 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
1004 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
1006 if (pcie->update_fc_fixup) {
1007 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
1008 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
1009 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
1012 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
1017 static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
1019 u32 val, offset, speed, tmp;
1020 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1021 struct pcie_port *pp = &pci->pp;
1024 if (pcie->mode == DW_PCIE_EP_TYPE) {
1025 enable_irq(pcie->pex_rst_irq);
1031 val = appl_readl(pcie, APPL_PINMUX);
1032 val &= ~APPL_PINMUX_PEX_RST;
1033 appl_writel(pcie, val, APPL_PINMUX);
1035 usleep_range(100, 200);
1038 val = appl_readl(pcie, APPL_CTRL);
1039 val |= APPL_CTRL_LTSSM_EN;
1040 appl_writel(pcie, val, APPL_CTRL);
1043 val = appl_readl(pcie, APPL_PINMUX);
1044 val |= APPL_PINMUX_PEX_RST;
1045 appl_writel(pcie, val, APPL_PINMUX);
1049 if (dw_pcie_wait_for_link(pci)) {
1053 * There are some endpoints which can't get the link up if
1054 * root port has Data Link Feature (DLF) enabled.
1055 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
1056 * on Scaled Flow Control and DLF.
1057 * So, need to confirm that is indeed the case here and attempt
1058 * link up once again with DLF disabled.
1060 val = appl_readl(pcie, APPL_DEBUG);
1061 val &= APPL_DEBUG_LTSSM_STATE_MASK;
1062 val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
1063 tmp = appl_readl(pcie, APPL_LINK_STATUS);
1064 tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
1065 if (!(val == 0x11 && !tmp)) {
1066 /* Link is down for all good reasons */
1070 dev_info(pci->dev, "Link is down in DLL");
1071 dev_info(pci->dev, "Trying again with DLFE disabled\n");
1073 val = appl_readl(pcie, APPL_CTRL);
1074 val &= ~APPL_CTRL_LTSSM_EN;
1075 appl_writel(pcie, val, APPL_CTRL);
1077 reset_control_assert(pcie->core_rst);
1078 reset_control_deassert(pcie->core_rst);
1080 offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
1081 val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
1082 val &= ~PCI_DLF_EXCHANGE_ENABLE;
1083 dw_pcie_writel_dbi(pci, offset, val);
1085 tegra_pcie_dw_host_init(pp);
1086 dw_pcie_setup_rc(pp);
1092 speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
1094 clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
1096 tegra_pcie_enable_interrupts(pp);
1101 static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
1103 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1104 u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
1106 return !!(val & PCI_EXP_LNKSTA_DLLLA);
1109 static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
1111 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1113 disable_irq(pcie->pex_rst_irq);
1116 static const struct dw_pcie_ops tegra_dw_pcie_ops = {
1117 .link_up = tegra_pcie_dw_link_up,
1118 .start_link = tegra_pcie_dw_start_link,
1119 .stop_link = tegra_pcie_dw_stop_link,
1122 static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
1123 .host_init = tegra_pcie_dw_host_init,
1126 static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
1128 unsigned int phy_count = pcie->phy_count;
1130 while (phy_count--) {
1131 phy_power_off(pcie->phys[phy_count]);
1132 phy_exit(pcie->phys[phy_count]);
1136 static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
1141 for (i = 0; i < pcie->phy_count; i++) {
1142 ret = phy_init(pcie->phys[i]);
1146 ret = phy_power_on(pcie->phys[i]);
1155 phy_power_off(pcie->phys[i]);
1157 phy_exit(pcie->phys[i]);
1163 static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
1165 struct platform_device *pdev = to_platform_device(pcie->dev);
1166 struct device_node *np = pcie->dev->of_node;
1169 pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1170 if (!pcie->dbi_res) {
1171 dev_err(pcie->dev, "Failed to find \"dbi\" region\n");
1175 ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
1177 dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
1181 ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
1182 &pcie->aspm_pwr_on_t);
1184 dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
1187 ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
1188 &pcie->aspm_l0s_enter_lat);
1191 "Failed to read ASPM L0s Entrance latency: %d\n", ret);
1193 ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
1195 dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
1199 ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
1201 dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
1205 ret = of_property_count_strings(np, "phy-names");
1207 dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
1211 pcie->phy_count = ret;
1213 if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
1214 pcie->update_fc_fixup = true;
1216 pcie->supports_clkreq =
1217 of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
1219 pcie->enable_cdm_check =
1220 of_property_read_bool(np, "snps,enable-cdm-check");
1222 if (pcie->mode == DW_PCIE_RC_TYPE)
1225 /* Endpoint mode specific DT entries */
1226 pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
1227 if (IS_ERR(pcie->pex_rst_gpiod)) {
1228 int err = PTR_ERR(pcie->pex_rst_gpiod);
1229 const char *level = KERN_ERR;
1231 if (err == -EPROBE_DEFER)
1234 dev_printk(level, pcie->dev,
1235 dev_fmt("Failed to get PERST GPIO: %d\n"),
1240 pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
1241 "nvidia,refclk-select",
1243 if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
1244 int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
1245 const char *level = KERN_ERR;
1247 if (err == -EPROBE_DEFER)
1250 dev_printk(level, pcie->dev,
1251 dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
1253 pcie->pex_refclk_sel_gpiod = NULL;
1259 static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
1262 struct mrq_uphy_response resp;
1263 struct tegra_bpmp_message msg;
1264 struct mrq_uphy_request req;
1266 /* Controller-5 doesn't need to have its state set by BPMP-FW */
1270 memset(&req, 0, sizeof(req));
1271 memset(&resp, 0, sizeof(resp));
1273 req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
1274 req.controller_state.pcie_controller = pcie->cid;
1275 req.controller_state.enable = enable;
1277 memset(&msg, 0, sizeof(msg));
1280 msg.tx.size = sizeof(req);
1281 msg.rx.data = &resp;
1282 msg.rx.size = sizeof(resp);
1284 return tegra_bpmp_transfer(pcie->bpmp, &msg);
1287 static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
1290 struct mrq_uphy_response resp;
1291 struct tegra_bpmp_message msg;
1292 struct mrq_uphy_request req;
1294 memset(&req, 0, sizeof(req));
1295 memset(&resp, 0, sizeof(resp));
1298 req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
1299 req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
1301 req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
1302 req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
1305 memset(&msg, 0, sizeof(msg));
1308 msg.tx.size = sizeof(req);
1309 msg.rx.data = &resp;
1310 msg.rx.size = sizeof(resp);
1312 return tegra_bpmp_transfer(pcie->bpmp, &msg);
1315 static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
1317 struct pcie_port *pp = &pcie->pci.pp;
1318 struct pci_bus *child, *root_bus = NULL;
1319 struct pci_dev *pdev;
1322 * link doesn't go into L2 state with some of the endpoints with Tegra
1323 * if they are not in D0 state. So, need to make sure that immediate
1324 * downstream devices are in D0 state before sending PME_TurnOff to put
1325 * link into L2 state.
1326 * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
1327 * 5.2 Link State Power Management (Page #428).
1330 list_for_each_entry(child, &pp->bridge->bus->children, node) {
1331 /* Bring downstream devices to D0 if they are not already in */
1332 if (child->parent == pp->bridge->bus) {
1339 dev_err(pcie->dev, "Failed to find downstream devices\n");
1343 list_for_each_entry(pdev, &root_bus->devices, bus_list) {
1344 if (PCI_SLOT(pdev->devfn) == 0) {
1345 if (pci_set_power_state(pdev, PCI_D0))
1347 "Failed to transition %s to D0 state\n",
1348 dev_name(&pdev->dev));
1353 static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
1355 pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
1356 if (IS_ERR(pcie->slot_ctl_3v3)) {
1357 if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
1358 return PTR_ERR(pcie->slot_ctl_3v3);
1360 pcie->slot_ctl_3v3 = NULL;
1363 pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
1364 if (IS_ERR(pcie->slot_ctl_12v)) {
1365 if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
1366 return PTR_ERR(pcie->slot_ctl_12v);
1368 pcie->slot_ctl_12v = NULL;
1374 static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
1378 if (pcie->slot_ctl_3v3) {
1379 ret = regulator_enable(pcie->slot_ctl_3v3);
1382 "Failed to enable 3.3V slot supply: %d\n", ret);
1387 if (pcie->slot_ctl_12v) {
1388 ret = regulator_enable(pcie->slot_ctl_12v);
1391 "Failed to enable 12V slot supply: %d\n", ret);
1392 goto fail_12v_enable;
1397 * According to PCI Express Card Electromechanical Specification
1398 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
1399 * should be a minimum of 100ms.
1401 if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
1407 if (pcie->slot_ctl_3v3)
1408 regulator_disable(pcie->slot_ctl_3v3);
1412 static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
1414 if (pcie->slot_ctl_12v)
1415 regulator_disable(pcie->slot_ctl_12v);
1416 if (pcie->slot_ctl_3v3)
1417 regulator_disable(pcie->slot_ctl_3v3);
1420 static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
1426 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
1429 "Failed to enable controller %u: %d\n", pcie->cid, ret);
1433 ret = tegra_pcie_enable_slot_regulators(pcie);
1435 goto fail_slot_reg_en;
1437 ret = regulator_enable(pcie->pex_ctl_supply);
1439 dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
1443 ret = clk_prepare_enable(pcie->core_clk);
1445 dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
1449 ret = reset_control_deassert(pcie->core_apb_rst);
1451 dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
1453 goto fail_core_apb_rst;
1456 if (en_hw_hot_rst) {
1457 /* Enable HW_HOT_RST mode */
1458 val = appl_readl(pcie, APPL_CTRL);
1459 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
1460 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1461 val |= APPL_CTRL_HW_HOT_RST_EN;
1462 appl_writel(pcie, val, APPL_CTRL);
1465 ret = tegra_pcie_enable_phy(pcie);
1467 dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
1471 /* Update CFG base address */
1472 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1473 APPL_CFG_BASE_ADDR);
1475 /* Configure this core for RP mode operation */
1476 appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
1478 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1480 val = appl_readl(pcie, APPL_CTRL);
1481 appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
1483 val = appl_readl(pcie, APPL_CFG_MISC);
1484 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1485 appl_writel(pcie, val, APPL_CFG_MISC);
1487 if (!pcie->supports_clkreq) {
1488 val = appl_readl(pcie, APPL_PINMUX);
1489 val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
1490 val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
1491 appl_writel(pcie, val, APPL_PINMUX);
1494 /* Update iATU_DMA base address */
1496 pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1497 APPL_CFG_IATU_DMA_BASE_ADDR);
1499 reset_control_deassert(pcie->core_rst);
1504 reset_control_assert(pcie->core_apb_rst);
1506 clk_disable_unprepare(pcie->core_clk);
1508 regulator_disable(pcie->pex_ctl_supply);
1510 tegra_pcie_disable_slot_regulators(pcie);
1512 tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1517 static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
1521 ret = reset_control_assert(pcie->core_rst);
1523 dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret);
1525 tegra_pcie_disable_phy(pcie);
1527 ret = reset_control_assert(pcie->core_apb_rst);
1529 dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
1531 clk_disable_unprepare(pcie->core_clk);
1533 ret = regulator_disable(pcie->pex_ctl_supply);
1535 dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
1537 tegra_pcie_disable_slot_regulators(pcie);
1539 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1541 dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
1545 static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
1547 struct dw_pcie *pci = &pcie->pci;
1548 struct pcie_port *pp = &pci->pp;
1551 ret = tegra_pcie_config_controller(pcie, false);
1555 pp->ops = &tegra_pcie_dw_host_ops;
1557 ret = dw_pcie_host_init(pp);
1559 dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
1560 goto fail_host_init;
1566 tegra_pcie_unconfig_controller(pcie);
1570 static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
1574 if (!tegra_pcie_dw_link_up(&pcie->pci))
1577 val = appl_readl(pcie, APPL_RADM_STATUS);
1578 val |= APPL_PM_XMT_TURNOFF_STATE;
1579 appl_writel(pcie, val, APPL_RADM_STATUS);
1581 return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
1582 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
1583 1, PME_ACK_TIMEOUT);
1586 static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
1591 if (!tegra_pcie_dw_link_up(&pcie->pci)) {
1592 dev_dbg(pcie->dev, "PCIe link is not up...!\n");
1596 if (tegra_pcie_try_link_l2(pcie)) {
1597 dev_info(pcie->dev, "Link didn't transition to L2 state\n");
1599 * TX lane clock freq will reset to Gen1 only if link is in L2
1601 * So apply pex_rst to end point to force RP to go into detect
1604 data = appl_readl(pcie, APPL_PINMUX);
1605 data &= ~APPL_PINMUX_PEX_RST;
1606 appl_writel(pcie, data, APPL_PINMUX);
1609 * Some cards do not go to detect state even after de-asserting
1610 * PERST#. So, de-assert LTSSM to bring link to detect state.
1612 data = readl(pcie->appl_base + APPL_CTRL);
1613 data &= ~APPL_CTRL_LTSSM_EN;
1614 writel(data, pcie->appl_base + APPL_CTRL);
1616 err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
1619 APPL_DEBUG_LTSSM_STATE_MASK) >>
1620 APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1621 LTSSM_STATE_PRE_DETECT,
1624 dev_info(pcie->dev, "Link didn't go to detect state\n");
1627 * DBI registers may not be accessible after this as PLL-E would be
1628 * down depending on how CLKREQ is pulled by end point
1630 data = appl_readl(pcie, APPL_PINMUX);
1631 data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
1632 /* Cut REFCLK to slot */
1633 data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1634 data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1635 appl_writel(pcie, data, APPL_PINMUX);
1638 static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
1640 tegra_pcie_downstream_dev_to_D0(pcie);
1641 dw_pcie_host_deinit(&pcie->pci.pp);
1642 tegra_pcie_dw_pme_turnoff(pcie);
1643 tegra_pcie_unconfig_controller(pcie);
1646 static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
1648 struct device *dev = pcie->dev;
1652 pm_runtime_enable(dev);
1654 ret = pm_runtime_get_sync(dev);
1656 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1658 goto fail_pm_get_sync;
1661 ret = pinctrl_pm_select_default_state(dev);
1663 dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
1664 goto fail_pm_get_sync;
1667 ret = tegra_pcie_init_controller(pcie);
1669 dev_err(dev, "Failed to initialize controller: %d\n", ret);
1670 goto fail_pm_get_sync;
1673 pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
1674 if (!pcie->link_state) {
1676 goto fail_host_init;
1679 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
1682 goto fail_host_init;
1685 pcie->debugfs = debugfs_create_dir(name, NULL);
1691 tegra_pcie_deinit_controller(pcie);
1693 pm_runtime_put_sync(dev);
1694 pm_runtime_disable(dev);
1698 static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
1703 if (pcie->ep_state == EP_STATE_DISABLED)
1707 val = appl_readl(pcie, APPL_CTRL);
1708 val &= ~APPL_CTRL_LTSSM_EN;
1709 appl_writel(pcie, val, APPL_CTRL);
1711 ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
1712 ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
1713 APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1714 LTSSM_STATE_PRE_DETECT,
1717 dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
1719 reset_control_assert(pcie->core_rst);
1721 tegra_pcie_disable_phy(pcie);
1723 reset_control_assert(pcie->core_apb_rst);
1725 clk_disable_unprepare(pcie->core_clk);
1727 pm_runtime_put_sync(pcie->dev);
1729 ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
1731 dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
1733 pcie->ep_state = EP_STATE_DISABLED;
1734 dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
1737 static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
1739 struct dw_pcie *pci = &pcie->pci;
1740 struct dw_pcie_ep *ep = &pci->ep;
1741 struct device *dev = pcie->dev;
1745 if (pcie->ep_state == EP_STATE_ENABLED)
1748 ret = pm_runtime_resume_and_get(dev);
1750 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1755 ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
1757 dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", ret);
1761 ret = clk_prepare_enable(pcie->core_clk);
1763 dev_err(dev, "Failed to enable core clock: %d\n", ret);
1764 goto fail_core_clk_enable;
1767 ret = reset_control_deassert(pcie->core_apb_rst);
1769 dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
1770 goto fail_core_apb_rst;
1773 ret = tegra_pcie_enable_phy(pcie);
1775 dev_err(dev, "Failed to enable PHY: %d\n", ret);
1779 /* Clear any stale interrupt statuses */
1780 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
1781 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
1782 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
1783 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
1784 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
1785 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
1786 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
1787 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
1788 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
1789 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
1790 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
1791 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
1792 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
1793 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
1794 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
1796 /* configure this core for EP mode operation */
1797 val = appl_readl(pcie, APPL_DM_TYPE);
1798 val &= ~APPL_DM_TYPE_MASK;
1799 val |= APPL_DM_TYPE_EP;
1800 appl_writel(pcie, val, APPL_DM_TYPE);
1802 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1804 val = appl_readl(pcie, APPL_CTRL);
1805 val |= APPL_CTRL_SYS_PRE_DET_STATE;
1806 val |= APPL_CTRL_HW_HOT_RST_EN;
1807 appl_writel(pcie, val, APPL_CTRL);
1809 val = appl_readl(pcie, APPL_CFG_MISC);
1810 val |= APPL_CFG_MISC_SLV_EP_MODE;
1811 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1812 appl_writel(pcie, val, APPL_CFG_MISC);
1814 val = appl_readl(pcie, APPL_PINMUX);
1815 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1816 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1817 appl_writel(pcie, val, APPL_PINMUX);
1819 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1820 APPL_CFG_BASE_ADDR);
1822 appl_writel(pcie, pcie->atu_dma_res->start &
1823 APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1824 APPL_CFG_IATU_DMA_BASE_ADDR);
1826 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
1827 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
1828 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
1829 val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
1830 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
1832 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
1833 val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
1834 val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
1835 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
1837 reset_control_deassert(pcie->core_rst);
1839 if (pcie->update_fc_fixup) {
1840 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
1841 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
1842 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
1845 config_gen3_gen4_eq_presets(pcie);
1847 init_host_aspm(pcie);
1849 /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
1850 if (!pcie->supports_clkreq) {
1851 disable_aspm_l11(pcie);
1852 disable_aspm_l12(pcie);
1855 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
1856 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
1857 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
1859 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
1861 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
1863 val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
1864 val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
1865 dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
1866 val = (lower_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
1867 dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
1869 ret = dw_pcie_ep_init_complete(ep);
1871 dev_err(dev, "Failed to complete initialization: %d\n", ret);
1872 goto fail_init_complete;
1875 dw_pcie_ep_init_notify(ep);
1878 val = appl_readl(pcie, APPL_CTRL);
1879 val |= APPL_CTRL_LTSSM_EN;
1880 appl_writel(pcie, val, APPL_CTRL);
1882 pcie->ep_state = EP_STATE_ENABLED;
1883 dev_dbg(dev, "Initialization of endpoint is completed\n");
1888 reset_control_assert(pcie->core_rst);
1889 tegra_pcie_disable_phy(pcie);
1891 reset_control_assert(pcie->core_apb_rst);
1893 clk_disable_unprepare(pcie->core_clk);
1894 fail_core_clk_enable:
1895 tegra_pcie_bpmp_set_pll_state(pcie, false);
1897 pm_runtime_put_sync(dev);
1900 static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
1902 struct tegra_pcie_dw *pcie = arg;
1904 if (gpiod_get_value(pcie->pex_rst_gpiod))
1905 pex_ep_event_pex_rst_assert(pcie);
1907 pex_ep_event_pex_rst_deassert(pcie);
1912 static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
1914 /* Tegra194 supports only INTA */
1918 appl_writel(pcie, 1, APPL_LEGACY_INTX);
1919 usleep_range(1000, 2000);
1920 appl_writel(pcie, 0, APPL_LEGACY_INTX);
1924 static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
1926 if (unlikely(irq > 31))
1929 appl_writel(pcie, (1 << irq), APPL_MSI_CTRL_1);
1934 static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
1936 struct dw_pcie_ep *ep = &pcie->pci.ep;
1938 writel(irq, ep->msi_mem);
1943 static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
1944 enum pci_epc_irq_type type,
1947 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1948 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1951 case PCI_EPC_IRQ_LEGACY:
1952 return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
1954 case PCI_EPC_IRQ_MSI:
1955 return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
1957 case PCI_EPC_IRQ_MSIX:
1958 return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
1961 dev_err(pci->dev, "Unknown IRQ type\n");
1968 static const struct pci_epc_features tegra_pcie_epc_features = {
1969 .linkup_notifier = true,
1970 .core_init_notifier = true,
1971 .msi_capable = false,
1972 .msix_capable = false,
1973 .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
1974 .bar_fixed_64bit = 1 << BAR_0,
1975 .bar_fixed_size[0] = SZ_1M,
1978 static const struct pci_epc_features*
1979 tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
1981 return &tegra_pcie_epc_features;
1984 static const struct dw_pcie_ep_ops pcie_ep_ops = {
1985 .raise_irq = tegra_pcie_ep_raise_irq,
1986 .get_features = tegra_pcie_ep_get_features,
1989 static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
1990 struct platform_device *pdev)
1992 struct dw_pcie *pci = &pcie->pci;
1993 struct device *dev = pcie->dev;
1994 struct dw_pcie_ep *ep;
1999 ep->ops = &pcie_ep_ops;
2001 ep->page_size = SZ_64K;
2003 ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
2005 dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
2010 ret = gpiod_to_irq(pcie->pex_rst_gpiod);
2012 dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
2015 pcie->pex_rst_irq = (unsigned int)ret;
2017 name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
2020 dev_err(dev, "Failed to create PERST IRQ string\n");
2024 irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
2026 pcie->ep_state = EP_STATE_DISABLED;
2028 ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
2029 tegra_pcie_ep_pex_rst_irq,
2030 IRQF_TRIGGER_RISING |
2031 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
2032 name, (void *)pcie);
2034 dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
2038 name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_ep_work",
2041 dev_err(dev, "Failed to create PCIe EP work thread string\n");
2045 pm_runtime_enable(dev);
2047 ret = dw_pcie_ep_init(ep);
2049 dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
2057 static int tegra_pcie_dw_probe(struct platform_device *pdev)
2059 const struct tegra_pcie_dw_of_data *data;
2060 struct device *dev = &pdev->dev;
2061 struct resource *atu_dma_res;
2062 struct tegra_pcie_dw *pcie;
2063 struct pcie_port *pp;
2064 struct dw_pcie *pci;
2070 data = of_device_get_match_data(dev);
2072 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
2077 pci->dev = &pdev->dev;
2078 pci->ops = &tegra_dw_pcie_ops;
2079 pci->n_fts[0] = N_FTS_VAL;
2080 pci->n_fts[1] = FTS_VAL;
2081 pci->version = 0x490A;
2084 pp->num_vectors = MAX_MSI_IRQS;
2085 pcie->dev = &pdev->dev;
2086 pcie->mode = (enum dw_pcie_device_mode)data->mode;
2088 ret = tegra_pcie_dw_parse_dt(pcie);
2090 const char *level = KERN_ERR;
2092 if (ret == -EPROBE_DEFER)
2095 dev_printk(level, dev,
2096 dev_fmt("Failed to parse device tree: %d\n"),
2101 ret = tegra_pcie_get_slot_regulators(pcie);
2103 const char *level = KERN_ERR;
2105 if (ret == -EPROBE_DEFER)
2108 dev_printk(level, dev,
2109 dev_fmt("Failed to get slot regulators: %d\n"),
2114 if (pcie->pex_refclk_sel_gpiod)
2115 gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
2117 pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
2118 if (IS_ERR(pcie->pex_ctl_supply)) {
2119 ret = PTR_ERR(pcie->pex_ctl_supply);
2120 if (ret != -EPROBE_DEFER)
2121 dev_err(dev, "Failed to get regulator: %ld\n",
2122 PTR_ERR(pcie->pex_ctl_supply));
2126 pcie->core_clk = devm_clk_get(dev, "core");
2127 if (IS_ERR(pcie->core_clk)) {
2128 dev_err(dev, "Failed to get core clock: %ld\n",
2129 PTR_ERR(pcie->core_clk));
2130 return PTR_ERR(pcie->core_clk);
2133 pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2135 if (!pcie->appl_res) {
2136 dev_err(dev, "Failed to find \"appl\" region\n");
2140 pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
2141 if (IS_ERR(pcie->appl_base))
2142 return PTR_ERR(pcie->appl_base);
2144 pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
2145 if (IS_ERR(pcie->core_apb_rst)) {
2146 dev_err(dev, "Failed to get APB reset: %ld\n",
2147 PTR_ERR(pcie->core_apb_rst));
2148 return PTR_ERR(pcie->core_apb_rst);
2151 phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
2155 for (i = 0; i < pcie->phy_count; i++) {
2156 name = kasprintf(GFP_KERNEL, "p2u-%u", i);
2158 dev_err(dev, "Failed to create P2U string\n");
2161 phys[i] = devm_phy_get(dev, name);
2163 if (IS_ERR(phys[i])) {
2164 ret = PTR_ERR(phys[i]);
2165 if (ret != -EPROBE_DEFER)
2166 dev_err(dev, "Failed to get PHY: %d\n", ret);
2173 atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2176 dev_err(dev, "Failed to find \"atu_dma\" region\n");
2179 pcie->atu_dma_res = atu_dma_res;
2181 pci->atu_size = resource_size(atu_dma_res);
2182 pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
2183 if (IS_ERR(pci->atu_base))
2184 return PTR_ERR(pci->atu_base);
2186 pcie->core_rst = devm_reset_control_get(dev, "core");
2187 if (IS_ERR(pcie->core_rst)) {
2188 dev_err(dev, "Failed to get core reset: %ld\n",
2189 PTR_ERR(pcie->core_rst));
2190 return PTR_ERR(pcie->core_rst);
2193 pp->irq = platform_get_irq_byname(pdev, "intr");
2197 pcie->bpmp = tegra_bpmp_get(dev);
2198 if (IS_ERR(pcie->bpmp))
2199 return PTR_ERR(pcie->bpmp);
2201 platform_set_drvdata(pdev, pcie);
2203 switch (pcie->mode) {
2204 case DW_PCIE_RC_TYPE:
2205 ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
2206 IRQF_SHARED, "tegra-pcie-intr", pcie);
2208 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2213 ret = tegra_pcie_config_rp(pcie);
2214 if (ret && ret != -ENOMEDIUM)
2220 case DW_PCIE_EP_TYPE:
2221 ret = devm_request_threaded_irq(dev, pp->irq,
2222 tegra_pcie_ep_hard_irq,
2223 tegra_pcie_ep_irq_thread,
2224 IRQF_SHARED | IRQF_ONESHOT,
2225 "tegra-pcie-ep-intr", pcie);
2227 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2232 ret = tegra_pcie_config_ep(pcie, pdev);
2238 dev_err(dev, "Invalid PCIe device type %d\n", pcie->mode);
2242 tegra_bpmp_put(pcie->bpmp);
2246 static int tegra_pcie_dw_remove(struct platform_device *pdev)
2248 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
2250 if (!pcie->link_state)
2253 debugfs_remove_recursive(pcie->debugfs);
2254 tegra_pcie_deinit_controller(pcie);
2255 pm_runtime_put_sync(pcie->dev);
2256 pm_runtime_disable(pcie->dev);
2257 tegra_bpmp_put(pcie->bpmp);
2258 if (pcie->pex_refclk_sel_gpiod)
2259 gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
2264 static int tegra_pcie_dw_suspend_late(struct device *dev)
2266 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2269 if (!pcie->link_state)
2272 /* Enable HW_HOT_RST mode */
2273 val = appl_readl(pcie, APPL_CTRL);
2274 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2275 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2276 val |= APPL_CTRL_HW_HOT_RST_EN;
2277 appl_writel(pcie, val, APPL_CTRL);
2282 static int tegra_pcie_dw_suspend_noirq(struct device *dev)
2284 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2286 if (!pcie->link_state)
2289 /* Save MSI interrupt vector */
2290 pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci,
2291 PORT_LOGIC_MSI_CTRL_INT_0_EN);
2292 tegra_pcie_downstream_dev_to_D0(pcie);
2293 tegra_pcie_dw_pme_turnoff(pcie);
2294 tegra_pcie_unconfig_controller(pcie);
2299 static int tegra_pcie_dw_resume_noirq(struct device *dev)
2301 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2304 if (!pcie->link_state)
2307 ret = tegra_pcie_config_controller(pcie, true);
2311 ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
2313 dev_err(dev, "Failed to init host: %d\n", ret);
2314 goto fail_host_init;
2317 ret = tegra_pcie_dw_start_link(&pcie->pci);
2319 goto fail_host_init;
2321 /* Restore MSI interrupt vector */
2322 dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN,
2323 pcie->msi_ctrl_int);
2328 tegra_pcie_unconfig_controller(pcie);
2332 static int tegra_pcie_dw_resume_early(struct device *dev)
2334 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2337 if (!pcie->link_state)
2340 /* Disable HW_HOT_RST mode */
2341 val = appl_readl(pcie, APPL_CTRL);
2342 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2343 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2344 val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
2345 APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
2346 val &= ~APPL_CTRL_HW_HOT_RST_EN;
2347 appl_writel(pcie, val, APPL_CTRL);
2352 static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
2354 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
2356 if (!pcie->link_state)
2359 debugfs_remove_recursive(pcie->debugfs);
2360 tegra_pcie_downstream_dev_to_D0(pcie);
2362 disable_irq(pcie->pci.pp.irq);
2363 if (IS_ENABLED(CONFIG_PCI_MSI))
2364 disable_irq(pcie->pci.pp.msi_irq);
2366 tegra_pcie_dw_pme_turnoff(pcie);
2367 tegra_pcie_unconfig_controller(pcie);
2370 static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = {
2371 .mode = DW_PCIE_RC_TYPE,
2374 static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data = {
2375 .mode = DW_PCIE_EP_TYPE,
2378 static const struct of_device_id tegra_pcie_dw_of_match[] = {
2380 .compatible = "nvidia,tegra194-pcie",
2381 .data = &tegra_pcie_dw_rc_of_data,
2384 .compatible = "nvidia,tegra194-pcie-ep",
2385 .data = &tegra_pcie_dw_ep_of_data,
2390 static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
2391 .suspend_late = tegra_pcie_dw_suspend_late,
2392 .suspend_noirq = tegra_pcie_dw_suspend_noirq,
2393 .resume_noirq = tegra_pcie_dw_resume_noirq,
2394 .resume_early = tegra_pcie_dw_resume_early,
2397 static struct platform_driver tegra_pcie_dw_driver = {
2398 .probe = tegra_pcie_dw_probe,
2399 .remove = tegra_pcie_dw_remove,
2400 .shutdown = tegra_pcie_dw_shutdown,
2402 .name = "tegra194-pcie",
2403 .pm = &tegra_pcie_dw_pm_ops,
2404 .of_match_table = tegra_pcie_dw_of_match,
2407 module_platform_driver(tegra_pcie_dw_driver);
2409 MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
2411 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
2412 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
2413 MODULE_LICENSE("GPL v2");
2415 #endif /* CONFIG_PCIE_TEGRA194 */