1 // SPDX-License-Identifier: GPL-2.0
3 * Intel Combo-PHY driver
5 * Copyright (C) 2019-2020 Intel Corporation.
8 #include <linux/bitfield.h>
10 #include <linux/iopoll.h>
11 #include <linux/mfd/syscon.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
15 #include <linux/phy/phy.h>
16 #include <linux/platform_device.h>
17 #include <linux/regmap.h>
18 #include <linux/reset.h>
20 #include <dt-bindings/phy/phy.h>
22 #define PCIE_PHY_GEN_CTRL 0x00
23 #define PCIE_PHY_CLK_PAD BIT(17)
25 #define PAD_DIS_CFG 0x174
27 #define PCS_XF_ATE_OVRD_IN_2 0x3008
28 #define ADAPT_REQ_MSK GENMASK(5, 4)
30 #define PCS_XF_RX_ADAPT_ACK 0x3010
31 #define RX_ADAPT_ACK_BIT BIT(0)
33 #define CR_ADDR(addr, lane) (((addr) + (lane) * 0x100) << 2)
34 #define REG_COMBO_MODE(x) ((x) * 0x200)
35 #define REG_CLK_DISABLE(x) ((x) * 0x200 + 0x124)
37 #define COMBO_PHY_ID(x) ((x)->parent->id)
38 #define PHY_ID(x) ((x)->id)
40 #define CLK_100MHZ 100000000
41 #define CLK_156_25MHZ 156250000
43 static const unsigned long intel_iphy_clk_rates[] = {
44 CLK_100MHZ, CLK_156_25MHZ, CLK_100MHZ,
54 * Clock Register bit fields to enable clocks
55 * for ComboPhy according to the mode.
63 /* ComboPhy mode Register values */
64 enum intel_combo_mode {
72 enum aggregated_mode {
77 struct intel_combo_phy;
79 struct intel_cbphy_iphy {
81 struct intel_combo_phy *parent;
82 struct reset_control *app_rst;
86 struct intel_combo_phy {
89 unsigned long clk_rate;
90 void __iomem *app_base;
91 void __iomem *cr_base;
92 struct regmap *syscfg;
93 struct regmap *hsiocfg;
96 struct reset_control *phy_rst;
97 struct reset_control *core_rst;
98 struct intel_cbphy_iphy iphy[PHY_MAX_NUM];
99 enum intel_phy_mode phy_mode;
100 enum aggregated_mode aggr_mode;
105 static int intel_cbphy_iphy_enable(struct intel_cbphy_iphy *iphy, bool set)
107 struct intel_combo_phy *cbphy = iphy->parent;
108 u32 mask = BIT(cbphy->phy_mode * 2 + iphy->id);
111 /* Register: 0 is enable, 1 is disable */
112 val = set ? 0 : mask;
114 return regmap_update_bits(cbphy->hsiocfg, REG_CLK_DISABLE(cbphy->bid),
118 static int intel_cbphy_pcie_refclk_cfg(struct intel_cbphy_iphy *iphy, bool set)
120 struct intel_combo_phy *cbphy = iphy->parent;
121 u32 mask = BIT(cbphy->id * 2 + iphy->id);
124 /* Register: 0 is enable, 1 is disable */
125 val = set ? 0 : mask;
127 return regmap_update_bits(cbphy->syscfg, PAD_DIS_CFG, mask, val);
130 static inline void combo_phy_w32_off_mask(void __iomem *base, unsigned int reg,
135 reg_val = readl(base + reg);
138 writel(reg_val, base + reg);
141 static int intel_cbphy_iphy_cfg(struct intel_cbphy_iphy *iphy,
142 int (*phy_cfg)(struct intel_cbphy_iphy *))
144 struct intel_combo_phy *cbphy = iphy->parent;
151 if (cbphy->aggr_mode != PHY_DL_MODE)
154 return phy_cfg(&cbphy->iphy[PHY_1]);
157 static int intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy *iphy)
159 struct intel_combo_phy *cbphy = iphy->parent;
162 ret = intel_cbphy_pcie_refclk_cfg(iphy, true);
164 dev_err(cbphy->dev, "Failed to enable PCIe pad refclk\n");
171 combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
172 PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 0));
174 /* Delay for stable clock PLL */
175 usleep_range(50, 100);
180 static int intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy *iphy)
182 struct intel_combo_phy *cbphy = iphy->parent;
185 ret = intel_cbphy_pcie_refclk_cfg(iphy, false);
187 dev_err(cbphy->dev, "Failed to disable PCIe pad refclk\n");
194 combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
195 PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 1));
200 static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy)
202 enum intel_combo_mode cb_mode = PHY_PCIE_MODE;
203 enum aggregated_mode aggr = cbphy->aggr_mode;
204 struct device *dev = cbphy->dev;
205 enum intel_phy_mode mode;
208 mode = cbphy->phy_mode;
212 cb_mode = (aggr == PHY_DL_MODE) ? PCIE_DL_MODE : PCIE0_PCIE1_MODE;
216 cb_mode = (aggr == PHY_DL_MODE) ? RXAUI_MODE : XPCS0_XPCS1_MODE;
220 if (aggr == PHY_DL_MODE) {
221 dev_err(dev, "Mode:%u not support dual lane!\n", mode);
225 cb_mode = SATA0_SATA1_MODE;
229 ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode);
231 dev_err(dev, "Failed to set ComboPhy mode: %d\n", ret);
236 static void intel_cbphy_rst_assert(struct intel_combo_phy *cbphy)
238 reset_control_assert(cbphy->core_rst);
239 reset_control_assert(cbphy->phy_rst);
242 static void intel_cbphy_rst_deassert(struct intel_combo_phy *cbphy)
244 reset_control_deassert(cbphy->core_rst);
245 reset_control_deassert(cbphy->phy_rst);
246 /* Delay to ensure reset process is done */
247 usleep_range(10, 20);
250 static int intel_cbphy_iphy_power_on(struct intel_cbphy_iphy *iphy)
252 struct intel_combo_phy *cbphy = iphy->parent;
255 if (!cbphy->init_cnt) {
256 ret = clk_prepare_enable(cbphy->core_clk);
258 dev_err(cbphy->dev, "Clock enable failed!\n");
262 ret = clk_set_rate(cbphy->core_clk, cbphy->clk_rate);
264 dev_err(cbphy->dev, "Clock freq set to %lu failed!\n",
269 intel_cbphy_rst_assert(cbphy);
270 intel_cbphy_rst_deassert(cbphy);
271 ret = intel_cbphy_set_mode(cbphy);
276 ret = intel_cbphy_iphy_enable(iphy, true);
278 dev_err(cbphy->dev, "Failed enabling PHY core\n");
282 ret = reset_control_deassert(iphy->app_rst);
284 dev_err(cbphy->dev, "PHY(%u:%u) reset deassert failed!\n",
285 COMBO_PHY_ID(iphy), PHY_ID(iphy));
289 /* Delay to ensure reset process is done */
295 clk_disable_unprepare(cbphy->core_clk);
300 static int intel_cbphy_iphy_power_off(struct intel_cbphy_iphy *iphy)
302 struct intel_combo_phy *cbphy = iphy->parent;
305 ret = reset_control_assert(iphy->app_rst);
307 dev_err(cbphy->dev, "PHY(%u:%u) reset assert failed!\n",
308 COMBO_PHY_ID(iphy), PHY_ID(iphy));
312 ret = intel_cbphy_iphy_enable(iphy, false);
314 dev_err(cbphy->dev, "Failed disabling PHY core\n");
321 clk_disable_unprepare(cbphy->core_clk);
322 intel_cbphy_rst_assert(cbphy);
327 static int intel_cbphy_init(struct phy *phy)
329 struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
330 struct intel_combo_phy *cbphy = iphy->parent;
333 mutex_lock(&cbphy->lock);
334 ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_on);
338 if (cbphy->phy_mode == PHY_PCIE_MODE) {
339 ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_en_pad_refclk);
347 mutex_unlock(&cbphy->lock);
352 static int intel_cbphy_exit(struct phy *phy)
354 struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
355 struct intel_combo_phy *cbphy = iphy->parent;
358 mutex_lock(&cbphy->lock);
360 if (cbphy->phy_mode == PHY_PCIE_MODE) {
361 ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_dis_pad_refclk);
366 ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_off);
369 mutex_unlock(&cbphy->lock);
374 static int intel_cbphy_calibrate(struct phy *phy)
376 struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
377 struct intel_combo_phy *cbphy = iphy->parent;
378 void __iomem *cr_base = cbphy->cr_base;
381 if (cbphy->phy_mode != PHY_XPCS_MODE)
386 /* trigger auto RX adaptation */
387 combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
388 ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 3));
389 /* Wait RX adaptation to finish */
390 ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id),
391 val, val & RX_ADAPT_ACK_BIT, 10, 5000);
393 dev_err(cbphy->dev, "RX Adaptation failed!\n");
395 dev_dbg(cbphy->dev, "RX Adaptation success!\n");
397 /* Stop RX adaptation */
398 combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
399 ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 0));
404 static int intel_cbphy_fwnode_parse(struct intel_combo_phy *cbphy)
406 struct device *dev = cbphy->dev;
407 struct platform_device *pdev = to_platform_device(dev);
408 struct fwnode_handle *fwnode = dev_fwnode(dev);
409 struct fwnode_reference_args ref;
413 cbphy->core_clk = devm_clk_get(dev, NULL);
414 if (IS_ERR(cbphy->core_clk)) {
415 ret = PTR_ERR(cbphy->core_clk);
416 if (ret != -EPROBE_DEFER)
417 dev_err(dev, "Get clk failed:%d!\n", ret);
421 cbphy->core_rst = devm_reset_control_get_optional(dev, "core");
422 if (IS_ERR(cbphy->core_rst)) {
423 ret = PTR_ERR(cbphy->core_rst);
424 if (ret != -EPROBE_DEFER)
425 dev_err(dev, "Get core reset control err: %d!\n", ret);
429 cbphy->phy_rst = devm_reset_control_get_optional(dev, "phy");
430 if (IS_ERR(cbphy->phy_rst)) {
431 ret = PTR_ERR(cbphy->phy_rst);
432 if (ret != -EPROBE_DEFER)
433 dev_err(dev, "Get PHY reset control err: %d!\n", ret);
437 cbphy->iphy[0].app_rst = devm_reset_control_get_optional(dev, "iphy0");
438 if (IS_ERR(cbphy->iphy[0].app_rst)) {
439 ret = PTR_ERR(cbphy->iphy[0].app_rst);
440 if (ret != -EPROBE_DEFER)
441 dev_err(dev, "Get phy0 reset control err: %d!\n", ret);
445 cbphy->iphy[1].app_rst = devm_reset_control_get_optional(dev, "iphy1");
446 if (IS_ERR(cbphy->iphy[1].app_rst)) {
447 ret = PTR_ERR(cbphy->iphy[1].app_rst);
448 if (ret != -EPROBE_DEFER)
449 dev_err(dev, "Get phy1 reset control err: %d!\n", ret);
453 cbphy->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
454 if (IS_ERR(cbphy->app_base))
455 return PTR_ERR(cbphy->app_base);
457 cbphy->cr_base = devm_platform_ioremap_resource_byname(pdev, "core");
458 if (IS_ERR(cbphy->cr_base))
459 return PTR_ERR(cbphy->cr_base);
462 * syscfg and hsiocfg variables stores the handle of the registers set
463 * in which ComboPhy subsytem specific registers are subset. Using
464 * Register map framework to access the registers set.
466 ret = fwnode_property_get_reference_args(fwnode, "intel,syscfg", NULL,
471 cbphy->id = ref.args[0];
472 cbphy->syscfg = device_node_to_regmap(to_of_node(ref.fwnode));
473 fwnode_handle_put(ref.fwnode);
475 ret = fwnode_property_get_reference_args(fwnode, "intel,hsio", NULL, 1,
480 cbphy->bid = ref.args[0];
481 cbphy->hsiocfg = device_node_to_regmap(to_of_node(ref.fwnode));
482 fwnode_handle_put(ref.fwnode);
484 ret = fwnode_property_read_u32_array(fwnode, "intel,phy-mode", &val, 1);
490 cbphy->phy_mode = PHY_PCIE_MODE;
494 cbphy->phy_mode = PHY_SATA_MODE;
498 cbphy->phy_mode = PHY_XPCS_MODE;
502 dev_err(dev, "Invalid PHY mode: %u\n", val);
506 cbphy->clk_rate = intel_iphy_clk_rates[cbphy->phy_mode];
508 if (fwnode_property_present(fwnode, "intel,aggregation"))
509 cbphy->aggr_mode = PHY_DL_MODE;
511 cbphy->aggr_mode = PHY_SL_MODE;
516 static const struct phy_ops intel_cbphy_ops = {
517 .init = intel_cbphy_init,
518 .exit = intel_cbphy_exit,
519 .calibrate = intel_cbphy_calibrate,
520 .owner = THIS_MODULE,
523 static struct phy *intel_cbphy_xlate(struct device *dev,
524 struct of_phandle_args *args)
526 struct intel_combo_phy *cbphy = dev_get_drvdata(dev);
529 if (args->args_count < 1) {
530 dev_err(dev, "Invalid number of arguments\n");
531 return ERR_PTR(-EINVAL);
534 iphy_id = args->args[0];
535 if (iphy_id >= PHY_MAX_NUM) {
536 dev_err(dev, "Invalid phy instance %d\n", iphy_id);
537 return ERR_PTR(-EINVAL);
540 if (cbphy->aggr_mode == PHY_DL_MODE && iphy_id == PHY_1) {
541 dev_err(dev, "Invalid. ComboPhy is in Dual lane mode %d\n", iphy_id);
542 return ERR_PTR(-EINVAL);
545 return cbphy->iphy[iphy_id].phy;
548 static int intel_cbphy_create(struct intel_combo_phy *cbphy)
550 struct phy_provider *phy_provider;
551 struct device *dev = cbphy->dev;
552 struct intel_cbphy_iphy *iphy;
555 for (i = 0; i < PHY_MAX_NUM; i++) {
556 iphy = &cbphy->iphy[i];
557 iphy->parent = cbphy;
560 /* In dual lane mode skip phy creation for the second phy */
561 if (cbphy->aggr_mode == PHY_DL_MODE && iphy->id == PHY_1)
564 iphy->phy = devm_phy_create(dev, NULL, &intel_cbphy_ops);
565 if (IS_ERR(iphy->phy)) {
566 dev_err(dev, "PHY[%u:%u]: create PHY instance failed!\n",
567 COMBO_PHY_ID(iphy), PHY_ID(iphy));
569 return PTR_ERR(iphy->phy);
572 phy_set_drvdata(iphy->phy, iphy);
575 dev_set_drvdata(dev, cbphy);
576 phy_provider = devm_of_phy_provider_register(dev, intel_cbphy_xlate);
577 if (IS_ERR(phy_provider))
578 dev_err(dev, "Register PHY provider failed!\n");
580 return PTR_ERR_OR_ZERO(phy_provider);
583 static int intel_cbphy_probe(struct platform_device *pdev)
585 struct device *dev = &pdev->dev;
586 struct intel_combo_phy *cbphy;
589 cbphy = devm_kzalloc(dev, sizeof(*cbphy), GFP_KERNEL);
595 mutex_init(&cbphy->lock);
596 ret = intel_cbphy_fwnode_parse(cbphy);
600 platform_set_drvdata(pdev, cbphy);
602 return intel_cbphy_create(cbphy);
605 static int intel_cbphy_remove(struct platform_device *pdev)
607 struct intel_combo_phy *cbphy = platform_get_drvdata(pdev);
609 intel_cbphy_rst_assert(cbphy);
610 clk_disable_unprepare(cbphy->core_clk);
614 static const struct of_device_id of_intel_cbphy_match[] = {
615 { .compatible = "intel,combo-phy" },
616 { .compatible = "intel,combophy-lgm" },
620 static struct platform_driver intel_cbphy_driver = {
621 .probe = intel_cbphy_probe,
622 .remove = intel_cbphy_remove,
624 .name = "intel-combo-phy",
625 .of_match_table = of_intel_cbphy_match,
629 module_platform_driver(intel_cbphy_driver);
631 MODULE_DESCRIPTION("Intel Combo-phy driver");
632 MODULE_LICENSE("GPL v2");