1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe host controller driver
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * https://www.samsung.com
8 * Author: Jingoo Han <jg1.han@samsung.com>
11 #include <linux/delay.h>
13 #include <linux/types.h>
15 #include "../../pci.h"
16 #include "pcie-designware.h"
19 * These interfaces resemble the pci_find_*capability() interfaces, but these
20 * are for configuring host controllers, which are bridges *to* PCI devices but
21 * are not PCI devices themselves.
23 static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
26 u8 cap_id, next_cap_ptr;
32 reg = dw_pcie_readw_dbi(pci, cap_ptr);
33 cap_id = (reg & 0x00ff);
35 if (cap_id > PCI_CAP_ID_MAX)
41 next_cap_ptr = (reg & 0xff00) >> 8;
42 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
45 u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
50 reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
51 next_cap_ptr = (reg & 0x00ff);
53 return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
55 EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
57 static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
62 int pos = PCI_CFG_SPACE_SIZE;
64 /* minimum 8 bytes per capability */
65 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
70 header = dw_pcie_readl_dbi(pci, pos);
72 * If we have no capabilities, this is indicated by cap ID,
73 * cap version and next pointer all being 0.
79 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
82 pos = PCI_EXT_CAP_NEXT(header);
83 if (pos < PCI_CFG_SPACE_SIZE)
86 header = dw_pcie_readl_dbi(pci, pos);
92 u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
94 return dw_pcie_find_next_ext_capability(pci, 0, cap);
96 EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
98 int dw_pcie_read(void __iomem *addr, int size, u32 *val)
100 if (!IS_ALIGNED((uintptr_t)addr, size)) {
102 return PCIBIOS_BAD_REGISTER_NUMBER;
107 } else if (size == 2) {
109 } else if (size == 1) {
113 return PCIBIOS_BAD_REGISTER_NUMBER;
116 return PCIBIOS_SUCCESSFUL;
118 EXPORT_SYMBOL_GPL(dw_pcie_read);
120 int dw_pcie_write(void __iomem *addr, int size, u32 val)
122 if (!IS_ALIGNED((uintptr_t)addr, size))
123 return PCIBIOS_BAD_REGISTER_NUMBER;
132 return PCIBIOS_BAD_REGISTER_NUMBER;
134 return PCIBIOS_SUCCESSFUL;
136 EXPORT_SYMBOL_GPL(dw_pcie_write);
138 u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
143 if (pci->ops->read_dbi)
144 return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
146 ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
148 dev_err(pci->dev, "Read DBI address failed\n");
152 EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
154 void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
158 if (pci->ops->write_dbi) {
159 pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
163 ret = dw_pcie_write(pci->dbi_base + reg, size, val);
165 dev_err(pci->dev, "Write DBI address failed\n");
167 EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
169 void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
173 if (pci->ops->write_dbi2) {
174 pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
178 ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
180 dev_err(pci->dev, "write DBI address failed\n");
183 static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
188 if (pci->ops->read_dbi)
189 return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
191 ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
193 dev_err(pci->dev, "Read ATU address failed\n");
198 static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
202 if (pci->ops->write_dbi) {
203 pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
207 ret = dw_pcie_write(pci->atu_base + reg, 4, val);
209 dev_err(pci->dev, "Write ATU address failed\n");
212 static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
214 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
216 return dw_pcie_readl_atu(pci, offset + reg);
219 static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
222 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
224 dw_pcie_writel_atu(pci, offset + reg, val);
227 static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
228 int type, u64 cpu_addr,
229 u64 pci_addr, u32 size)
232 u64 limit_addr = cpu_addr + size - 1;
234 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
235 lower_32_bits(cpu_addr));
236 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
237 upper_32_bits(cpu_addr));
238 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
239 lower_32_bits(limit_addr));
240 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
241 upper_32_bits(limit_addr));
242 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
243 lower_32_bits(pci_addr));
244 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
245 upper_32_bits(pci_addr));
246 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
248 dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
252 * Make sure ATU enable takes effect before any subsequent config
255 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
256 val = dw_pcie_readl_ob_unroll(pci, index,
257 PCIE_ATU_UNR_REGION_CTRL2);
258 if (val & PCIE_ATU_ENABLE)
261 mdelay(LINK_WAIT_IATU);
263 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
266 void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
267 u64 cpu_addr, u64 pci_addr, u32 size)
271 if (pci->ops->cpu_addr_fixup)
272 cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
274 if (pci->iatu_unroll_enabled) {
275 dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
280 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
281 PCIE_ATU_REGION_OUTBOUND | index);
282 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
283 lower_32_bits(cpu_addr));
284 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
285 upper_32_bits(cpu_addr));
286 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
287 lower_32_bits(cpu_addr + size - 1));
288 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
289 lower_32_bits(pci_addr));
290 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
291 upper_32_bits(pci_addr));
292 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
293 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
296 * Make sure ATU enable takes effect before any subsequent config
299 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
300 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
301 if (val & PCIE_ATU_ENABLE)
304 mdelay(LINK_WAIT_IATU);
306 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
309 static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
311 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
313 return dw_pcie_readl_atu(pci, offset + reg);
316 static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
319 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
321 dw_pcie_writel_atu(pci, offset + reg, val);
324 static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
325 int bar, u64 cpu_addr,
326 enum dw_pcie_as_type as_type)
331 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
332 lower_32_bits(cpu_addr));
333 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
334 upper_32_bits(cpu_addr));
338 type = PCIE_ATU_TYPE_MEM;
341 type = PCIE_ATU_TYPE_IO;
347 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
348 dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
350 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
353 * Make sure ATU enable takes effect before any subsequent config
356 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
357 val = dw_pcie_readl_ib_unroll(pci, index,
358 PCIE_ATU_UNR_REGION_CTRL2);
359 if (val & PCIE_ATU_ENABLE)
362 mdelay(LINK_WAIT_IATU);
364 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
369 int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
370 u64 cpu_addr, enum dw_pcie_as_type as_type)
375 if (pci->iatu_unroll_enabled)
376 return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
379 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
381 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
382 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
386 type = PCIE_ATU_TYPE_MEM;
389 type = PCIE_ATU_TYPE_IO;
395 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
396 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
397 | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
400 * Make sure ATU enable takes effect before any subsequent config
403 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
404 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
405 if (val & PCIE_ATU_ENABLE)
408 mdelay(LINK_WAIT_IATU);
410 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
415 void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
416 enum dw_pcie_region_type type)
421 case DW_PCIE_REGION_INBOUND:
422 region = PCIE_ATU_REGION_INBOUND;
424 case DW_PCIE_REGION_OUTBOUND:
425 region = PCIE_ATU_REGION_OUTBOUND;
431 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
432 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE);
435 int dw_pcie_wait_for_link(struct dw_pcie *pci)
439 /* Check if the link is up or not */
440 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
441 if (dw_pcie_link_up(pci)) {
442 dev_info(pci->dev, "Link up\n");
445 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
448 dev_info(pci->dev, "Phy link never came up\n");
452 EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
454 int dw_pcie_link_up(struct dw_pcie *pci)
458 if (pci->ops->link_up)
459 return pci->ops->link_up(pci);
461 val = readl(pci->dbi_base + PCIE_PORT_DEBUG1);
462 return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
463 (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
466 void dw_pcie_upconfig_setup(struct dw_pcie *pci)
470 val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
471 val |= PORT_MLTI_UPCFG_SUPPORT;
472 dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
474 EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
476 static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
478 u32 cap, ctrl2, link_speed;
479 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
481 cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
482 ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
483 ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
485 switch (pcie_link_speed[link_gen]) {
486 case PCIE_SPEED_2_5GT:
487 link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
489 case PCIE_SPEED_5_0GT:
490 link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
492 case PCIE_SPEED_8_0GT:
493 link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT;
495 case PCIE_SPEED_16_0GT:
496 link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT;
499 /* Use hardware capability */
500 link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
501 ctrl2 &= ~PCI_EXP_LNKCTL2_HASD;
505 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed);
507 cap &= ~((u32)PCI_EXP_LNKCAP_SLS);
508 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed);
512 void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts)
516 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
517 val &= ~PORT_LOGIC_N_FTS_MASK;
518 val |= n_fts & PORT_LOGIC_N_FTS_MASK;
519 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
521 EXPORT_SYMBOL_GPL(dw_pcie_link_set_n_fts);
523 static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
527 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
528 if (val == 0xffffffff)
534 void dw_pcie_setup(struct dw_pcie *pci)
537 struct device *dev = pci->dev;
538 struct device_node *np = dev->of_node;
540 if (pci->version >= 0x480A || (!pci->version &&
541 dw_pcie_iatu_unroll_enabled(pci))) {
542 pci->iatu_unroll_enabled = true;
544 pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
546 dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?
547 "enabled" : "disabled");
549 if (pci->link_gen > 0)
550 dw_pcie_link_set_max_speed(pci, pci->link_gen);
552 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
553 val &= ~PORT_LINK_FAST_LINK_MODE;
554 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
556 of_property_read_u32(np, "num-lanes", &pci->num_lanes);
557 if (!pci->num_lanes) {
558 dev_dbg(pci->dev, "Using h/w default number of lanes\n");
562 /* Set the number of lanes */
563 val &= ~PORT_LINK_FAST_LINK_MODE;
564 val &= ~PORT_LINK_MODE_MASK;
565 switch (pci->num_lanes) {
567 val |= PORT_LINK_MODE_1_LANES;
570 val |= PORT_LINK_MODE_2_LANES;
573 val |= PORT_LINK_MODE_4_LANES;
576 val |= PORT_LINK_MODE_8_LANES;
579 dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
582 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
584 /* Set link width speed control register */
585 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
586 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
587 switch (pci->num_lanes) {
589 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
592 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
595 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
598 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
601 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
603 if (of_property_read_bool(np, "snps,enable-cdm-check")) {
604 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
605 val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
606 PCIE_PL_CHK_REG_CHK_REG_START;
607 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);