Merge tag 'tag-chrome-platform-for-v5.10' of git://git.kernel.org/pub/scm/linux/kerne...
[linux-2.6-microblaze.git] / drivers / phy / intel / phy-intel-lgm-combo.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Combo-PHY driver
4  *
5  * Copyright (C) 2019-2020 Intel Corporation.
6  */
7
8 #include <linux/bitfield.h>
9 #include <linux/clk.h>
10 #include <linux/iopoll.h>
11 #include <linux/mfd/syscon.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/of.h>
15 #include <linux/phy/phy.h>
16 #include <linux/platform_device.h>
17 #include <linux/regmap.h>
18 #include <linux/reset.h>
19
20 #include <dt-bindings/phy/phy.h>
21
22 #define PCIE_PHY_GEN_CTRL       0x00
23 #define PCIE_PHY_CLK_PAD        BIT(17)
24
25 #define PAD_DIS_CFG             0x174
26
27 #define PCS_XF_ATE_OVRD_IN_2    0x3008
28 #define ADAPT_REQ_MSK           GENMASK(5, 4)
29
30 #define PCS_XF_RX_ADAPT_ACK     0x3010
31 #define RX_ADAPT_ACK_BIT        BIT(0)
32
33 #define CR_ADDR(addr, lane)     (((addr) + (lane) * 0x100) << 2)
34 #define REG_COMBO_MODE(x)       ((x) * 0x200)
35 #define REG_CLK_DISABLE(x)      ((x) * 0x200 + 0x124)
36
37 #define COMBO_PHY_ID(x)         ((x)->parent->id)
38 #define PHY_ID(x)               ((x)->id)
39
40 #define CLK_100MHZ              100000000
41 #define CLK_156_25MHZ           156250000
42
43 static const unsigned long intel_iphy_clk_rates[] = {
44         CLK_100MHZ, CLK_156_25MHZ, CLK_100MHZ,
45 };
46
47 enum {
48         PHY_0,
49         PHY_1,
50         PHY_MAX_NUM
51 };
52
53 /*
54  * Clock Register bit fields to enable clocks
55  * for ComboPhy according to the mode.
56  */
57 enum intel_phy_mode {
58         PHY_PCIE_MODE = 0,
59         PHY_XPCS_MODE,
60         PHY_SATA_MODE,
61 };
62
63 /* ComboPhy mode Register values */
64 enum intel_combo_mode {
65         PCIE0_PCIE1_MODE = 0,
66         PCIE_DL_MODE,
67         RXAUI_MODE,
68         XPCS0_XPCS1_MODE,
69         SATA0_SATA1_MODE,
70 };
71
72 enum aggregated_mode {
73         PHY_SL_MODE,
74         PHY_DL_MODE,
75 };
76
77 struct intel_combo_phy;
78
79 struct intel_cbphy_iphy {
80         struct phy              *phy;
81         struct intel_combo_phy  *parent;
82         struct reset_control    *app_rst;
83         u32                     id;
84 };
85
86 struct intel_combo_phy {
87         struct device           *dev;
88         struct clk              *core_clk;
89         unsigned long           clk_rate;
90         void __iomem            *app_base;
91         void __iomem            *cr_base;
92         struct regmap           *syscfg;
93         struct regmap           *hsiocfg;
94         u32                     id;
95         u32                     bid;
96         struct reset_control    *phy_rst;
97         struct reset_control    *core_rst;
98         struct intel_cbphy_iphy iphy[PHY_MAX_NUM];
99         enum intel_phy_mode     phy_mode;
100         enum aggregated_mode    aggr_mode;
101         u32                     init_cnt;
102         struct mutex            lock;
103 };
104
105 static int intel_cbphy_iphy_enable(struct intel_cbphy_iphy *iphy, bool set)
106 {
107         struct intel_combo_phy *cbphy = iphy->parent;
108         u32 mask = BIT(cbphy->phy_mode * 2 + iphy->id);
109         u32 val;
110
111         /* Register: 0 is enable, 1 is disable */
112         val = set ? 0 : mask;
113
114         return regmap_update_bits(cbphy->hsiocfg, REG_CLK_DISABLE(cbphy->bid),
115                                   mask, val);
116 }
117
118 static int intel_cbphy_pcie_refclk_cfg(struct intel_cbphy_iphy *iphy, bool set)
119 {
120         struct intel_combo_phy *cbphy = iphy->parent;
121         u32 mask = BIT(cbphy->id * 2 + iphy->id);
122         u32 val;
123
124         /* Register: 0 is enable, 1 is disable */
125         val = set ? 0 : mask;
126
127         return regmap_update_bits(cbphy->syscfg, PAD_DIS_CFG, mask, val);
128 }
129
130 static inline void combo_phy_w32_off_mask(void __iomem *base, unsigned int reg,
131                                           u32 mask, u32 val)
132 {
133         u32 reg_val;
134
135         reg_val = readl(base + reg);
136         reg_val &= ~mask;
137         reg_val |= val;
138         writel(reg_val, base + reg);
139 }
140
141 static int intel_cbphy_iphy_cfg(struct intel_cbphy_iphy *iphy,
142                                 int (*phy_cfg)(struct intel_cbphy_iphy *))
143 {
144         struct intel_combo_phy *cbphy = iphy->parent;
145         int ret;
146
147         ret = phy_cfg(iphy);
148         if (ret)
149                 return ret;
150
151         if (cbphy->aggr_mode != PHY_DL_MODE)
152                 return 0;
153
154         return phy_cfg(&cbphy->iphy[PHY_1]);
155 }
156
157 static int intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy *iphy)
158 {
159         struct intel_combo_phy *cbphy = iphy->parent;
160         int ret;
161
162         ret = intel_cbphy_pcie_refclk_cfg(iphy, true);
163         if (ret) {
164                 dev_err(cbphy->dev, "Failed to enable PCIe pad refclk\n");
165                 return ret;
166         }
167
168         if (cbphy->init_cnt)
169                 return 0;
170
171         combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
172                                PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 0));
173
174         /* Delay for stable clock PLL */
175         usleep_range(50, 100);
176
177         return 0;
178 }
179
180 static int intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy *iphy)
181 {
182         struct intel_combo_phy *cbphy = iphy->parent;
183         int ret;
184
185         ret = intel_cbphy_pcie_refclk_cfg(iphy, false);
186         if (ret) {
187                 dev_err(cbphy->dev, "Failed to disable PCIe pad refclk\n");
188                 return ret;
189         }
190
191         if (cbphy->init_cnt)
192                 return 0;
193
194         combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
195                                PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 1));
196
197         return 0;
198 }
199
200 static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy)
201 {
202         enum intel_combo_mode cb_mode;
203         enum aggregated_mode aggr = cbphy->aggr_mode;
204         struct device *dev = cbphy->dev;
205         enum intel_phy_mode mode;
206         int ret;
207
208         mode = cbphy->phy_mode;
209
210         switch (mode) {
211         case PHY_PCIE_MODE:
212                 cb_mode = (aggr == PHY_DL_MODE) ? PCIE_DL_MODE : PCIE0_PCIE1_MODE;
213                 break;
214
215         case PHY_XPCS_MODE:
216                 cb_mode = (aggr == PHY_DL_MODE) ? RXAUI_MODE : XPCS0_XPCS1_MODE;
217                 break;
218
219         case PHY_SATA_MODE:
220                 if (aggr == PHY_DL_MODE) {
221                         dev_err(dev, "Mode:%u not support dual lane!\n", mode);
222                         return -EINVAL;
223                 }
224
225                 cb_mode = SATA0_SATA1_MODE;
226                 break;
227         default:
228                 return -EINVAL;
229         }
230
231         ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode);
232         if (ret)
233                 dev_err(dev, "Failed to set ComboPhy mode: %d\n", ret);
234
235         return ret;
236 }
237
238 static void intel_cbphy_rst_assert(struct intel_combo_phy *cbphy)
239 {
240         reset_control_assert(cbphy->core_rst);
241         reset_control_assert(cbphy->phy_rst);
242 }
243
244 static void intel_cbphy_rst_deassert(struct intel_combo_phy *cbphy)
245 {
246         reset_control_deassert(cbphy->core_rst);
247         reset_control_deassert(cbphy->phy_rst);
248         /* Delay to ensure reset process is done */
249         usleep_range(10, 20);
250 }
251
252 static int intel_cbphy_iphy_power_on(struct intel_cbphy_iphy *iphy)
253 {
254         struct intel_combo_phy *cbphy = iphy->parent;
255         int ret;
256
257         if (!cbphy->init_cnt) {
258                 ret = clk_prepare_enable(cbphy->core_clk);
259                 if (ret) {
260                         dev_err(cbphy->dev, "Clock enable failed!\n");
261                         return ret;
262                 }
263
264                 ret = clk_set_rate(cbphy->core_clk, cbphy->clk_rate);
265                 if (ret) {
266                         dev_err(cbphy->dev, "Clock freq set to %lu failed!\n",
267                                 cbphy->clk_rate);
268                         goto clk_err;
269                 }
270
271                 intel_cbphy_rst_assert(cbphy);
272                 intel_cbphy_rst_deassert(cbphy);
273                 ret = intel_cbphy_set_mode(cbphy);
274                 if (ret)
275                         goto clk_err;
276         }
277
278         ret = intel_cbphy_iphy_enable(iphy, true);
279         if (ret) {
280                 dev_err(cbphy->dev, "Failed enabling PHY core\n");
281                 goto clk_err;
282         }
283
284         ret = reset_control_deassert(iphy->app_rst);
285         if (ret) {
286                 dev_err(cbphy->dev, "PHY(%u:%u) reset deassert failed!\n",
287                         COMBO_PHY_ID(iphy), PHY_ID(iphy));
288                 goto clk_err;
289         }
290
291         /* Delay to ensure reset process is done */
292         udelay(1);
293
294         return 0;
295
296 clk_err:
297         clk_disable_unprepare(cbphy->core_clk);
298
299         return ret;
300 }
301
302 static int intel_cbphy_iphy_power_off(struct intel_cbphy_iphy *iphy)
303 {
304         struct intel_combo_phy *cbphy = iphy->parent;
305         int ret;
306
307         ret = reset_control_assert(iphy->app_rst);
308         if (ret) {
309                 dev_err(cbphy->dev, "PHY(%u:%u) reset assert failed!\n",
310                         COMBO_PHY_ID(iphy), PHY_ID(iphy));
311                 return ret;
312         }
313
314         ret = intel_cbphy_iphy_enable(iphy, false);
315         if (ret) {
316                 dev_err(cbphy->dev, "Failed disabling PHY core\n");
317                 return ret;
318         }
319
320         if (cbphy->init_cnt)
321                 return 0;
322
323         clk_disable_unprepare(cbphy->core_clk);
324         intel_cbphy_rst_assert(cbphy);
325
326         return 0;
327 }
328
329 static int intel_cbphy_init(struct phy *phy)
330 {
331         struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
332         struct intel_combo_phy *cbphy = iphy->parent;
333         int ret;
334
335         mutex_lock(&cbphy->lock);
336         ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_on);
337         if (ret)
338                 goto err;
339
340         if (cbphy->phy_mode == PHY_PCIE_MODE) {
341                 ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_en_pad_refclk);
342                 if (ret)
343                         goto err;
344         }
345
346         cbphy->init_cnt++;
347
348 err:
349         mutex_unlock(&cbphy->lock);
350
351         return ret;
352 }
353
354 static int intel_cbphy_exit(struct phy *phy)
355 {
356         struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
357         struct intel_combo_phy *cbphy = iphy->parent;
358         int ret;
359
360         mutex_lock(&cbphy->lock);
361         cbphy->init_cnt--;
362         if (cbphy->phy_mode == PHY_PCIE_MODE) {
363                 ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_dis_pad_refclk);
364                 if (ret)
365                         goto err;
366         }
367
368         ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_off);
369
370 err:
371         mutex_unlock(&cbphy->lock);
372
373         return ret;
374 }
375
376 static int intel_cbphy_calibrate(struct phy *phy)
377 {
378         struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
379         struct intel_combo_phy *cbphy = iphy->parent;
380         void __iomem *cr_base = cbphy->cr_base;
381         int val, ret, id;
382
383         if (cbphy->phy_mode != PHY_XPCS_MODE)
384                 return 0;
385
386         id = PHY_ID(iphy);
387
388         /* trigger auto RX adaptation */
389         combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
390                                ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 3));
391         /* Wait RX adaptation to finish */
392         ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id),
393                                  val, val & RX_ADAPT_ACK_BIT, 10, 5000);
394         if (ret)
395                 dev_err(cbphy->dev, "RX Adaptation failed!\n");
396         else
397                 dev_dbg(cbphy->dev, "RX Adaptation success!\n");
398
399         /* Stop RX adaptation */
400         combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
401                                ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 0));
402
403         return ret;
404 }
405
406 static int intel_cbphy_fwnode_parse(struct intel_combo_phy *cbphy)
407 {
408         struct device *dev = cbphy->dev;
409         struct platform_device *pdev = to_platform_device(dev);
410         struct fwnode_handle *fwnode = dev_fwnode(dev);
411         struct fwnode_reference_args ref;
412         int ret;
413         u32 val;
414
415         cbphy->core_clk = devm_clk_get(dev, NULL);
416         if (IS_ERR(cbphy->core_clk)) {
417                 ret = PTR_ERR(cbphy->core_clk);
418                 if (ret != -EPROBE_DEFER)
419                         dev_err(dev, "Get clk failed:%d!\n", ret);
420                 return ret;
421         }
422
423         cbphy->core_rst = devm_reset_control_get_optional(dev, "core");
424         if (IS_ERR(cbphy->core_rst)) {
425                 ret = PTR_ERR(cbphy->core_rst);
426                 if (ret != -EPROBE_DEFER)
427                         dev_err(dev, "Get core reset control err: %d!\n", ret);
428                 return ret;
429         }
430
431         cbphy->phy_rst = devm_reset_control_get_optional(dev, "phy");
432         if (IS_ERR(cbphy->phy_rst)) {
433                 ret = PTR_ERR(cbphy->phy_rst);
434                 if (ret != -EPROBE_DEFER)
435                         dev_err(dev, "Get PHY reset control err: %d!\n", ret);
436                 return ret;
437         }
438
439         cbphy->iphy[0].app_rst = devm_reset_control_get_optional(dev, "iphy0");
440         if (IS_ERR(cbphy->iphy[0].app_rst)) {
441                 ret = PTR_ERR(cbphy->iphy[0].app_rst);
442                 if (ret != -EPROBE_DEFER)
443                         dev_err(dev, "Get phy0 reset control err: %d!\n", ret);
444                 return ret;
445         }
446
447         cbphy->iphy[1].app_rst = devm_reset_control_get_optional(dev, "iphy1");
448         if (IS_ERR(cbphy->iphy[1].app_rst)) {
449                 ret = PTR_ERR(cbphy->iphy[1].app_rst);
450                 if (ret != -EPROBE_DEFER)
451                         dev_err(dev, "Get phy1 reset control err: %d!\n", ret);
452                 return ret;
453         }
454
455         cbphy->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
456         if (IS_ERR(cbphy->app_base))
457                 return PTR_ERR(cbphy->app_base);
458
459         cbphy->cr_base = devm_platform_ioremap_resource_byname(pdev, "core");
460         if (IS_ERR(cbphy->cr_base))
461                 return PTR_ERR(cbphy->cr_base);
462
463         /*
464          * syscfg and hsiocfg variables stores the handle of the registers set
465          * in which ComboPhy subsytem specific registers are subset. Using
466          * Register map framework to access the registers set.
467          */
468         ret = fwnode_property_get_reference_args(fwnode, "intel,syscfg", NULL,
469                                                  1, 0, &ref);
470         if (ret < 0)
471                 return ret;
472
473         cbphy->id = ref.args[0];
474         cbphy->syscfg = device_node_to_regmap(to_of_node(ref.fwnode));
475         fwnode_handle_put(ref.fwnode);
476
477         ret = fwnode_property_get_reference_args(fwnode, "intel,hsio", NULL, 1,
478                                                  0, &ref);
479         if (ret < 0)
480                 return ret;
481
482         cbphy->bid = ref.args[0];
483         cbphy->hsiocfg = device_node_to_regmap(to_of_node(ref.fwnode));
484         fwnode_handle_put(ref.fwnode);
485
486         ret = fwnode_property_read_u32_array(fwnode, "intel,phy-mode", &val, 1);
487         if (ret)
488                 return ret;
489
490         switch (val) {
491         case PHY_TYPE_PCIE:
492                 cbphy->phy_mode = PHY_PCIE_MODE;
493                 break;
494
495         case PHY_TYPE_SATA:
496                 cbphy->phy_mode = PHY_SATA_MODE;
497                 break;
498
499         case PHY_TYPE_XPCS:
500                 cbphy->phy_mode = PHY_XPCS_MODE;
501                 break;
502
503         default:
504                 dev_err(dev, "Invalid PHY mode: %u\n", val);
505                 return -EINVAL;
506         }
507
508         cbphy->clk_rate = intel_iphy_clk_rates[cbphy->phy_mode];
509
510         if (fwnode_property_present(fwnode, "intel,aggregation"))
511                 cbphy->aggr_mode = PHY_DL_MODE;
512         else
513                 cbphy->aggr_mode = PHY_SL_MODE;
514
515         return 0;
516 }
517
518 static const struct phy_ops intel_cbphy_ops = {
519         .init           = intel_cbphy_init,
520         .exit           = intel_cbphy_exit,
521         .calibrate      = intel_cbphy_calibrate,
522         .owner          = THIS_MODULE,
523 };
524
525 static struct phy *intel_cbphy_xlate(struct device *dev,
526                                      struct of_phandle_args *args)
527 {
528         struct intel_combo_phy *cbphy = dev_get_drvdata(dev);
529         u32 iphy_id;
530
531         if (args->args_count < 1) {
532                 dev_err(dev, "Invalid number of arguments\n");
533                 return ERR_PTR(-EINVAL);
534         }
535
536         iphy_id = args->args[0];
537         if (iphy_id >= PHY_MAX_NUM) {
538                 dev_err(dev, "Invalid phy instance %d\n", iphy_id);
539                 return ERR_PTR(-EINVAL);
540         }
541
542         if (cbphy->aggr_mode == PHY_DL_MODE && iphy_id == PHY_1) {
543                 dev_err(dev, "Invalid. ComboPhy is in Dual lane mode %d\n", iphy_id);
544                 return ERR_PTR(-EINVAL);
545         }
546
547         return cbphy->iphy[iphy_id].phy;
548 }
549
550 static int intel_cbphy_create(struct intel_combo_phy *cbphy)
551 {
552         struct phy_provider *phy_provider;
553         struct device *dev = cbphy->dev;
554         struct intel_cbphy_iphy *iphy;
555         int i;
556
557         for (i = 0; i < PHY_MAX_NUM; i++) {
558                 iphy = &cbphy->iphy[i];
559                 iphy->parent = cbphy;
560                 iphy->id = i;
561
562                 /* In dual lane mode skip phy creation for the second phy */
563                 if (cbphy->aggr_mode == PHY_DL_MODE && iphy->id == PHY_1)
564                         continue;
565
566                 iphy->phy = devm_phy_create(dev, NULL, &intel_cbphy_ops);
567                 if (IS_ERR(iphy->phy)) {
568                         dev_err(dev, "PHY[%u:%u]: create PHY instance failed!\n",
569                                 COMBO_PHY_ID(iphy), PHY_ID(iphy));
570
571                         return PTR_ERR(iphy->phy);
572                 }
573
574                 phy_set_drvdata(iphy->phy, iphy);
575         }
576
577         dev_set_drvdata(dev, cbphy);
578         phy_provider = devm_of_phy_provider_register(dev, intel_cbphy_xlate);
579         if (IS_ERR(phy_provider))
580                 dev_err(dev, "Register PHY provider failed!\n");
581
582         return PTR_ERR_OR_ZERO(phy_provider);
583 }
584
585 static int intel_cbphy_probe(struct platform_device *pdev)
586 {
587         struct device *dev = &pdev->dev;
588         struct intel_combo_phy *cbphy;
589         int ret;
590
591         cbphy = devm_kzalloc(dev, sizeof(*cbphy), GFP_KERNEL);
592         if (!cbphy)
593                 return -ENOMEM;
594
595         cbphy->dev = dev;
596         cbphy->init_cnt = 0;
597         mutex_init(&cbphy->lock);
598         ret = intel_cbphy_fwnode_parse(cbphy);
599         if (ret)
600                 return ret;
601
602         platform_set_drvdata(pdev, cbphy);
603
604         return intel_cbphy_create(cbphy);
605 }
606
607 static int intel_cbphy_remove(struct platform_device *pdev)
608 {
609         struct intel_combo_phy *cbphy = platform_get_drvdata(pdev);
610
611         intel_cbphy_rst_assert(cbphy);
612         clk_disable_unprepare(cbphy->core_clk);
613         return 0;
614 }
615
616 static const struct of_device_id of_intel_cbphy_match[] = {
617         { .compatible = "intel,combo-phy" },
618         { .compatible = "intel,combophy-lgm" },
619         {}
620 };
621
622 static struct platform_driver intel_cbphy_driver = {
623         .probe = intel_cbphy_probe,
624         .remove = intel_cbphy_remove,
625         .driver = {
626                 .name = "intel-combo-phy",
627                 .of_match_table = of_intel_cbphy_match,
628         }
629 };
630
631 module_platform_driver(intel_cbphy_driver);
632
633 MODULE_DESCRIPTION("Intel Combo-phy driver");
634 MODULE_LICENSE("GPL v2");