c2a35be4cdfbd6c097d54d41bc38587bd653abd4
[linux-2.6-microblaze.git] / drivers / phy / intel / phy-intel-combo.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Combo-PHY driver
4  *
5  * Copyright (C) 2019-2020 Intel Corporation.
6  */
7
8 #include <linux/bitfield.h>
9 #include <linux/clk.h>
10 #include <linux/iopoll.h>
11 #include <linux/mfd/syscon.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/of.h>
15 #include <linux/phy/phy.h>
16 #include <linux/platform_device.h>
17 #include <linux/regmap.h>
18 #include <linux/reset.h>
19
20 #include <dt-bindings/phy/phy.h>
21
22 #define PCIE_PHY_GEN_CTRL       0x00
23 #define PCIE_PHY_CLK_PAD        BIT(17)
24
25 #define PAD_DIS_CFG             0x174
26
27 #define PCS_XF_ATE_OVRD_IN_2    0x3008
28 #define ADAPT_REQ_MSK           GENMASK(5, 4)
29
30 #define PCS_XF_RX_ADAPT_ACK     0x3010
31 #define RX_ADAPT_ACK_BIT        BIT(0)
32
33 #define CR_ADDR(addr, lane)     (((addr) + (lane) * 0x100) << 2)
34 #define REG_COMBO_MODE(x)       ((x) * 0x200)
35 #define REG_CLK_DISABLE(x)      ((x) * 0x200 + 0x124)
36
37 #define COMBO_PHY_ID(x)         ((x)->parent->id)
38 #define PHY_ID(x)               ((x)->id)
39
40 #define CLK_100MHZ              100000000
41 #define CLK_156_25MHZ           156250000
42
43 static const unsigned long intel_iphy_clk_rates[] = {
44         CLK_100MHZ, CLK_156_25MHZ, CLK_100MHZ,
45 };
46
47 enum {
48         PHY_0,
49         PHY_1,
50         PHY_MAX_NUM
51 };
52
53 /*
54  * Clock Register bit fields to enable clocks
55  * for ComboPhy according to the mode.
56  */
57 enum intel_phy_mode {
58         PHY_PCIE_MODE = 0,
59         PHY_XPCS_MODE,
60         PHY_SATA_MODE,
61 };
62
63 /* ComboPhy mode Register values */
64 enum intel_combo_mode {
65         PCIE0_PCIE1_MODE = 0,
66         PCIE_DL_MODE,
67         RXAUI_MODE,
68         XPCS0_XPCS1_MODE,
69         SATA0_SATA1_MODE,
70 };
71
72 enum aggregated_mode {
73         PHY_SL_MODE,
74         PHY_DL_MODE,
75 };
76
77 struct intel_combo_phy;
78
79 struct intel_cbphy_iphy {
80         struct phy              *phy;
81         struct intel_combo_phy  *parent;
82         struct reset_control    *app_rst;
83         u32                     id;
84 };
85
86 struct intel_combo_phy {
87         struct device           *dev;
88         struct clk              *core_clk;
89         unsigned long           clk_rate;
90         void __iomem            *app_base;
91         void __iomem            *cr_base;
92         struct regmap           *syscfg;
93         struct regmap           *hsiocfg;
94         u32                     id;
95         u32                     bid;
96         struct reset_control    *phy_rst;
97         struct reset_control    *core_rst;
98         struct intel_cbphy_iphy iphy[PHY_MAX_NUM];
99         enum intel_phy_mode     phy_mode;
100         enum aggregated_mode    aggr_mode;
101         u32                     init_cnt;
102         struct mutex            lock;
103 };
104
105 static int intel_cbphy_iphy_enable(struct intel_cbphy_iphy *iphy, bool set)
106 {
107         struct intel_combo_phy *cbphy = iphy->parent;
108         u32 mask = BIT(cbphy->phy_mode * 2 + iphy->id);
109         u32 val;
110
111         /* Register: 0 is enable, 1 is disable */
112         val = set ? 0 : mask;
113
114         return regmap_update_bits(cbphy->hsiocfg, REG_CLK_DISABLE(cbphy->bid),
115                                   mask, val);
116 }
117
118 static int intel_cbphy_pcie_refclk_cfg(struct intel_cbphy_iphy *iphy, bool set)
119 {
120         struct intel_combo_phy *cbphy = iphy->parent;
121         u32 mask = BIT(cbphy->id * 2 + iphy->id);
122         u32 val;
123
124         /* Register: 0 is enable, 1 is disable */
125         val = set ? 0 : mask;
126
127         return regmap_update_bits(cbphy->syscfg, PAD_DIS_CFG, mask, val);
128 }
129
130 static inline void combo_phy_w32_off_mask(void __iomem *base, unsigned int reg,
131                                           u32 mask, u32 val)
132 {
133         u32 reg_val;
134
135         reg_val = readl(base + reg);
136         reg_val &= ~mask;
137         reg_val |= FIELD_PREP(mask, val);
138         writel(reg_val, base + reg);
139 }
140
141 static int intel_cbphy_iphy_cfg(struct intel_cbphy_iphy *iphy,
142                                 int (*phy_cfg)(struct intel_cbphy_iphy *))
143 {
144         struct intel_combo_phy *cbphy = iphy->parent;
145         int ret;
146
147         ret = phy_cfg(iphy);
148         if (ret)
149                 return ret;
150
151         if (cbphy->aggr_mode != PHY_DL_MODE)
152                 return 0;
153
154         return phy_cfg(&cbphy->iphy[PHY_1]);
155 }
156
157 static int intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy *iphy)
158 {
159         struct intel_combo_phy *cbphy = iphy->parent;
160         int ret;
161
162         ret = intel_cbphy_pcie_refclk_cfg(iphy, true);
163         if (ret) {
164                 dev_err(cbphy->dev, "Failed to enable PCIe pad refclk\n");
165                 return ret;
166         }
167
168         if (cbphy->init_cnt)
169                 return 0;
170
171         combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
172                                PCIE_PHY_CLK_PAD, 0);
173
174         /* Delay for stable clock PLL */
175         usleep_range(50, 100);
176
177         return 0;
178 }
179
180 static int intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy *iphy)
181 {
182         struct intel_combo_phy *cbphy = iphy->parent;
183         int ret;
184
185         ret = intel_cbphy_pcie_refclk_cfg(iphy, false);
186         if (ret) {
187                 dev_err(cbphy->dev, "Failed to disable PCIe pad refclk\n");
188                 return ret;
189         }
190
191         if (cbphy->init_cnt)
192                 return 0;
193
194         combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
195                                PCIE_PHY_CLK_PAD, 1);
196
197         return 0;
198 }
199
200 static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy)
201 {
202         enum intel_combo_mode cb_mode = PHY_PCIE_MODE;
203         enum aggregated_mode aggr = cbphy->aggr_mode;
204         struct device *dev = cbphy->dev;
205         enum intel_phy_mode mode;
206         int ret;
207
208         mode = cbphy->phy_mode;
209
210         switch (mode) {
211         case PHY_PCIE_MODE:
212                 cb_mode = (aggr == PHY_DL_MODE) ? PCIE_DL_MODE : PCIE0_PCIE1_MODE;
213                 break;
214
215         case PHY_XPCS_MODE:
216                 cb_mode = (aggr == PHY_DL_MODE) ? RXAUI_MODE : XPCS0_XPCS1_MODE;
217                 break;
218
219         case PHY_SATA_MODE:
220                 if (aggr == PHY_DL_MODE) {
221                         dev_err(dev, "Mode:%u not support dual lane!\n", mode);
222                         return -EINVAL;
223                 }
224
225                 cb_mode = SATA0_SATA1_MODE;
226                 break;
227         }
228
229         ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode);
230         if (ret)
231                 dev_err(dev, "Failed to set ComboPhy mode: %d\n", ret);
232
233         return ret;
234 }
235
236 static void intel_cbphy_rst_assert(struct intel_combo_phy *cbphy)
237 {
238         reset_control_assert(cbphy->core_rst);
239         reset_control_assert(cbphy->phy_rst);
240 }
241
242 static void intel_cbphy_rst_deassert(struct intel_combo_phy *cbphy)
243 {
244         reset_control_deassert(cbphy->core_rst);
245         reset_control_deassert(cbphy->phy_rst);
246         /* Delay to ensure reset process is done */
247         usleep_range(10, 20);
248 }
249
250 static int intel_cbphy_iphy_power_on(struct intel_cbphy_iphy *iphy)
251 {
252         struct intel_combo_phy *cbphy = iphy->parent;
253         int ret;
254
255         if (!cbphy->init_cnt) {
256                 ret = clk_prepare_enable(cbphy->core_clk);
257                 if (ret) {
258                         dev_err(cbphy->dev, "Clock enable failed!\n");
259                         return ret;
260                 }
261
262                 ret = clk_set_rate(cbphy->core_clk, cbphy->clk_rate);
263                 if (ret) {
264                         dev_err(cbphy->dev, "Clock freq set to %lu failed!\n",
265                                 cbphy->clk_rate);
266                         goto clk_err;
267                 }
268
269                 intel_cbphy_rst_assert(cbphy);
270                 intel_cbphy_rst_deassert(cbphy);
271                 ret = intel_cbphy_set_mode(cbphy);
272                 if (ret)
273                         goto clk_err;
274         }
275
276         ret = intel_cbphy_iphy_enable(iphy, true);
277         if (ret) {
278                 dev_err(cbphy->dev, "Failed enabling PHY core\n");
279                 goto clk_err;
280         }
281
282         ret = reset_control_deassert(iphy->app_rst);
283         if (ret) {
284                 dev_err(cbphy->dev, "PHY(%u:%u) reset deassert failed!\n",
285                         COMBO_PHY_ID(iphy), PHY_ID(iphy));
286                 goto clk_err;
287         }
288
289         /* Delay to ensure reset process is done */
290         udelay(1);
291
292         return 0;
293
294 clk_err:
295         clk_disable_unprepare(cbphy->core_clk);
296
297         return ret;
298 }
299
300 static int intel_cbphy_iphy_power_off(struct intel_cbphy_iphy *iphy)
301 {
302         struct intel_combo_phy *cbphy = iphy->parent;
303         int ret;
304
305         ret = reset_control_assert(iphy->app_rst);
306         if (ret) {
307                 dev_err(cbphy->dev, "PHY(%u:%u) reset assert failed!\n",
308                         COMBO_PHY_ID(iphy), PHY_ID(iphy));
309                 return ret;
310         }
311
312         ret = intel_cbphy_iphy_enable(iphy, false);
313         if (ret) {
314                 dev_err(cbphy->dev, "Failed disabling PHY core\n");
315                 return ret;
316         }
317
318         if (cbphy->init_cnt)
319                 return 0;
320
321         clk_disable_unprepare(cbphy->core_clk);
322         intel_cbphy_rst_assert(cbphy);
323
324         return 0;
325 }
326
327 static int intel_cbphy_init(struct phy *phy)
328 {
329         struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
330         struct intel_combo_phy *cbphy = iphy->parent;
331         int ret;
332
333         mutex_lock(&cbphy->lock);
334         ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_on);
335         if (ret)
336                 goto err;
337
338         if (cbphy->phy_mode == PHY_PCIE_MODE) {
339                 ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_en_pad_refclk);
340                 if (ret)
341                         goto err;
342         }
343
344         cbphy->init_cnt++;
345
346 err:
347         mutex_unlock(&cbphy->lock);
348
349         return ret;
350 }
351
352 static int intel_cbphy_exit(struct phy *phy)
353 {
354         struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
355         struct intel_combo_phy *cbphy = iphy->parent;
356         int ret;
357
358         mutex_lock(&cbphy->lock);
359         cbphy->init_cnt--;
360         if (cbphy->phy_mode == PHY_PCIE_MODE) {
361                 ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_dis_pad_refclk);
362                 if (ret)
363                         goto err;
364         }
365
366         ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_off);
367
368 err:
369         mutex_unlock(&cbphy->lock);
370
371         return ret;
372 }
373
374 static int intel_cbphy_calibrate(struct phy *phy)
375 {
376         struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
377         struct intel_combo_phy *cbphy = iphy->parent;
378         void __iomem *cr_base = cbphy->cr_base;
379         int val, ret, id;
380
381         if (cbphy->phy_mode != PHY_XPCS_MODE)
382                 return 0;
383
384         id = PHY_ID(iphy);
385
386         /* trigger auto RX adaptation */
387         combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
388                                ADAPT_REQ_MSK, 3);
389         /* Wait RX adaptation to finish */
390         ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id),
391                                  val, val & RX_ADAPT_ACK_BIT, 10, 5000);
392         if (ret)
393                 dev_err(cbphy->dev, "RX Adaptation failed!\n");
394         else
395                 dev_dbg(cbphy->dev, "RX Adaptation success!\n");
396
397         /* Stop RX adaptation */
398         combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
399                                ADAPT_REQ_MSK, 0);
400
401         return ret;
402 }
403
404 static int intel_cbphy_fwnode_parse(struct intel_combo_phy *cbphy)
405 {
406         struct device *dev = cbphy->dev;
407         struct platform_device *pdev = to_platform_device(dev);
408         struct fwnode_handle *fwnode = dev_fwnode(dev);
409         struct fwnode_reference_args ref;
410         int ret;
411         u32 val;
412
413         cbphy->core_clk = devm_clk_get(dev, NULL);
414         if (IS_ERR(cbphy->core_clk)) {
415                 ret = PTR_ERR(cbphy->core_clk);
416                 if (ret != -EPROBE_DEFER)
417                         dev_err(dev, "Get clk failed:%d!\n", ret);
418                 return ret;
419         }
420
421         cbphy->core_rst = devm_reset_control_get_optional(dev, "core");
422         if (IS_ERR(cbphy->core_rst)) {
423                 ret = PTR_ERR(cbphy->core_rst);
424                 if (ret != -EPROBE_DEFER)
425                         dev_err(dev, "Get core reset control err: %d!\n", ret);
426                 return ret;
427         }
428
429         cbphy->phy_rst = devm_reset_control_get_optional(dev, "phy");
430         if (IS_ERR(cbphy->phy_rst)) {
431                 ret = PTR_ERR(cbphy->phy_rst);
432                 if (ret != -EPROBE_DEFER)
433                         dev_err(dev, "Get PHY reset control err: %d!\n", ret);
434                 return ret;
435         }
436
437         cbphy->iphy[0].app_rst = devm_reset_control_get_optional(dev, "iphy0");
438         if (IS_ERR(cbphy->iphy[0].app_rst)) {
439                 ret = PTR_ERR(cbphy->iphy[0].app_rst);
440                 if (ret != -EPROBE_DEFER)
441                         dev_err(dev, "Get phy0 reset control err: %d!\n", ret);
442                 return ret;
443         }
444
445         cbphy->iphy[1].app_rst = devm_reset_control_get_optional(dev, "iphy1");
446         if (IS_ERR(cbphy->iphy[1].app_rst)) {
447                 ret = PTR_ERR(cbphy->iphy[1].app_rst);
448                 if (ret != -EPROBE_DEFER)
449                         dev_err(dev, "Get phy1 reset control err: %d!\n", ret);
450                 return ret;
451         }
452
453         cbphy->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
454         if (IS_ERR(cbphy->app_base))
455                 return PTR_ERR(cbphy->app_base);
456
457         cbphy->cr_base = devm_platform_ioremap_resource_byname(pdev, "core");
458         if (IS_ERR(cbphy->cr_base))
459                 return PTR_ERR(cbphy->cr_base);
460
461         /*
462          * syscfg and hsiocfg variables stores the handle of the registers set
463          * in which ComboPhy subsytem specific registers are subset. Using
464          * Register map framework to access the registers set.
465          */
466         ret = fwnode_property_get_reference_args(fwnode, "intel,syscfg", NULL,
467                                                  1, 0, &ref);
468         if (ret < 0)
469                 return ret;
470
471         cbphy->id = ref.args[0];
472         cbphy->syscfg = device_node_to_regmap(to_of_node(ref.fwnode));
473         fwnode_handle_put(ref.fwnode);
474
475         ret = fwnode_property_get_reference_args(fwnode, "intel,hsio", NULL, 1,
476                                                  0, &ref);
477         if (ret < 0)
478                 return ret;
479
480         cbphy->bid = ref.args[0];
481         cbphy->hsiocfg = device_node_to_regmap(to_of_node(ref.fwnode));
482         fwnode_handle_put(ref.fwnode);
483
484         ret = fwnode_property_read_u32_array(fwnode, "intel,phy-mode", &val, 1);
485         if (ret)
486                 return ret;
487
488         switch (val) {
489         case PHY_TYPE_PCIE:
490                 cbphy->phy_mode = PHY_PCIE_MODE;
491                 break;
492
493         case PHY_TYPE_SATA:
494                 cbphy->phy_mode = PHY_SATA_MODE;
495                 break;
496
497         case PHY_TYPE_XPCS:
498                 cbphy->phy_mode = PHY_XPCS_MODE;
499                 break;
500
501         default:
502                 dev_err(dev, "Invalid PHY mode: %u\n", val);
503                 return -EINVAL;
504         }
505
506         cbphy->clk_rate = intel_iphy_clk_rates[cbphy->phy_mode];
507
508         if (fwnode_property_present(fwnode, "intel,aggregation"))
509                 cbphy->aggr_mode = PHY_DL_MODE;
510         else
511                 cbphy->aggr_mode = PHY_SL_MODE;
512
513         return 0;
514 }
515
516 static const struct phy_ops intel_cbphy_ops = {
517         .init           = intel_cbphy_init,
518         .exit           = intel_cbphy_exit,
519         .calibrate      = intel_cbphy_calibrate,
520         .owner          = THIS_MODULE,
521 };
522
523 static struct phy *intel_cbphy_xlate(struct device *dev,
524                                      struct of_phandle_args *args)
525 {
526         struct intel_combo_phy *cbphy = dev_get_drvdata(dev);
527         u32 iphy_id;
528
529         if (args->args_count < 1) {
530                 dev_err(dev, "Invalid number of arguments\n");
531                 return ERR_PTR(-EINVAL);
532         }
533
534         iphy_id = args->args[0];
535         if (iphy_id >= PHY_MAX_NUM) {
536                 dev_err(dev, "Invalid phy instance %d\n", iphy_id);
537                 return ERR_PTR(-EINVAL);
538         }
539
540         if (cbphy->aggr_mode == PHY_DL_MODE && iphy_id == PHY_1) {
541                 dev_err(dev, "Invalid. ComboPhy is in Dual lane mode %d\n", iphy_id);
542                 return ERR_PTR(-EINVAL);
543         }
544
545         return cbphy->iphy[iphy_id].phy;
546 }
547
548 static int intel_cbphy_create(struct intel_combo_phy *cbphy)
549 {
550         struct phy_provider *phy_provider;
551         struct device *dev = cbphy->dev;
552         struct intel_cbphy_iphy *iphy;
553         int i;
554
555         for (i = 0; i < PHY_MAX_NUM; i++) {
556                 iphy = &cbphy->iphy[i];
557                 iphy->parent = cbphy;
558                 iphy->id = i;
559
560                 /* In dual lane mode skip phy creation for the second phy */
561                 if (cbphy->aggr_mode == PHY_DL_MODE && iphy->id == PHY_1)
562                         continue;
563
564                 iphy->phy = devm_phy_create(dev, NULL, &intel_cbphy_ops);
565                 if (IS_ERR(iphy->phy)) {
566                         dev_err(dev, "PHY[%u:%u]: create PHY instance failed!\n",
567                                 COMBO_PHY_ID(iphy), PHY_ID(iphy));
568
569                         return PTR_ERR(iphy->phy);
570                 }
571
572                 phy_set_drvdata(iphy->phy, iphy);
573         }
574
575         dev_set_drvdata(dev, cbphy);
576         phy_provider = devm_of_phy_provider_register(dev, intel_cbphy_xlate);
577         if (IS_ERR(phy_provider))
578                 dev_err(dev, "Register PHY provider failed!\n");
579
580         return PTR_ERR_OR_ZERO(phy_provider);
581 }
582
583 static int intel_cbphy_probe(struct platform_device *pdev)
584 {
585         struct device *dev = &pdev->dev;
586         struct intel_combo_phy *cbphy;
587         int ret;
588
589         cbphy = devm_kzalloc(dev, sizeof(*cbphy), GFP_KERNEL);
590         if (!cbphy)
591                 return -ENOMEM;
592
593         cbphy->dev = dev;
594         cbphy->init_cnt = 0;
595         mutex_init(&cbphy->lock);
596         ret = intel_cbphy_fwnode_parse(cbphy);
597         if (ret)
598                 return ret;
599
600         platform_set_drvdata(pdev, cbphy);
601
602         return intel_cbphy_create(cbphy);
603 }
604
605 static int intel_cbphy_remove(struct platform_device *pdev)
606 {
607         struct intel_combo_phy *cbphy = platform_get_drvdata(pdev);
608
609         intel_cbphy_rst_assert(cbphy);
610         clk_disable_unprepare(cbphy->core_clk);
611         return 0;
612 }
613
614 static const struct of_device_id of_intel_cbphy_match[] = {
615         { .compatible = "intel,combo-phy" },
616         { .compatible = "intel,combophy-lgm" },
617         {}
618 };
619
620 static struct platform_driver intel_cbphy_driver = {
621         .probe = intel_cbphy_probe,
622         .remove = intel_cbphy_remove,
623         .driver = {
624                 .name = "intel-combo-phy",
625                 .of_match_table = of_intel_cbphy_match,
626         }
627 };
628
629 module_platform_driver(intel_cbphy_driver);
630
631 MODULE_DESCRIPTION("Intel Combo-phy driver");
632 MODULE_LICENSE("GPL v2");