Merge tag 'net-5.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-microblaze.git] / drivers / scsi / ufs / ufs-mediatek.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *      Stanley Chu <stanley.chu@mediatek.com>
6  *      Peter Wang <peter.wang@mediatek.com>
7  */
8
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/of.h>
12 #include <linux/of_address.h>
13 #include <linux/of_device.h>
14 #include <linux/phy/phy.h>
15 #include <linux/platform_device.h>
16 #include <linux/regulator/consumer.h>
17 #include <linux/reset.h>
18 #include <linux/sched/clock.h>
19 #include <linux/soc/mediatek/mtk_sip_svc.h>
20
21 #include "ufshcd.h"
22 #include "ufshcd-crypto.h"
23 #include "ufshcd-pltfrm.h"
24 #include "ufs_quirks.h"
25 #include "unipro.h"
26 #include "ufs-mediatek.h"
27
28 #define CREATE_TRACE_POINTS
29 #include "ufs-mediatek-trace.h"
30
31 #define ufs_mtk_smc(cmd, val, res) \
32         arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
33                       cmd, val, 0, 0, 0, 0, 0, &(res))
34
35 #define ufs_mtk_va09_pwr_ctrl(res, on) \
36         ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res)
37
38 #define ufs_mtk_crypto_ctrl(res, enable) \
39         ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
40
41 #define ufs_mtk_ref_clk_notify(on, res) \
42         ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
43
44 #define ufs_mtk_device_reset_ctrl(high, res) \
45         ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
46
47 static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
48         UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
49                 UFS_DEVICE_QUIRK_DELAY_AFTER_LPM),
50         UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
51                 UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
52         END_FIX
53 };
54
55 static const struct of_device_id ufs_mtk_of_match[] = {
56         { .compatible = "mediatek,mt8183-ufshci" },
57         {},
58 };
59
60 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
61 {
62         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
63
64         return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
65 }
66
67 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
68 {
69         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
70
71         return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
72 }
73
74 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
75 {
76         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
77
78         return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
79 }
80
81 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
82 {
83         u32 tmp;
84
85         if (enable) {
86                 ufshcd_dme_get(hba,
87                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
88                 tmp = tmp |
89                       (1 << RX_SYMBOL_CLK_GATE_EN) |
90                       (1 << SYS_CLK_GATE_EN) |
91                       (1 << TX_CLK_GATE_EN);
92                 ufshcd_dme_set(hba,
93                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
94
95                 ufshcd_dme_get(hba,
96                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
97                 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
98                 ufshcd_dme_set(hba,
99                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
100         } else {
101                 ufshcd_dme_get(hba,
102                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
103                 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
104                               (1 << SYS_CLK_GATE_EN) |
105                               (1 << TX_CLK_GATE_EN));
106                 ufshcd_dme_set(hba,
107                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
108
109                 ufshcd_dme_get(hba,
110                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
111                 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
112                 ufshcd_dme_set(hba,
113                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
114         }
115 }
116
117 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
118 {
119         struct arm_smccc_res res;
120
121         ufs_mtk_crypto_ctrl(res, 1);
122         if (res.a0) {
123                 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
124                          __func__, res.a0);
125                 hba->caps &= ~UFSHCD_CAP_CRYPTO;
126         }
127 }
128
129 static void ufs_mtk_host_reset(struct ufs_hba *hba)
130 {
131         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
132
133         reset_control_assert(host->hci_reset);
134         reset_control_assert(host->crypto_reset);
135         reset_control_assert(host->unipro_reset);
136
137         usleep_range(100, 110);
138
139         reset_control_deassert(host->unipro_reset);
140         reset_control_deassert(host->crypto_reset);
141         reset_control_deassert(host->hci_reset);
142 }
143
144 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
145                                        struct reset_control **rc,
146                                        char *str)
147 {
148         *rc = devm_reset_control_get(hba->dev, str);
149         if (IS_ERR(*rc)) {
150                 dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
151                          str, PTR_ERR(*rc));
152                 *rc = NULL;
153         }
154 }
155
156 static void ufs_mtk_init_reset(struct ufs_hba *hba)
157 {
158         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
159
160         ufs_mtk_init_reset_control(hba, &host->hci_reset,
161                                    "hci_rst");
162         ufs_mtk_init_reset_control(hba, &host->unipro_reset,
163                                    "unipro_rst");
164         ufs_mtk_init_reset_control(hba, &host->crypto_reset,
165                                    "crypto_rst");
166 }
167
168 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
169                                      enum ufs_notify_change_status status)
170 {
171         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
172         unsigned long flags;
173
174         if (status == PRE_CHANGE) {
175                 if (host->unipro_lpm) {
176                         hba->vps->hba_enable_delay_us = 0;
177                 } else {
178                         hba->vps->hba_enable_delay_us = 600;
179                         ufs_mtk_host_reset(hba);
180                 }
181
182                 if (hba->caps & UFSHCD_CAP_CRYPTO)
183                         ufs_mtk_crypto_enable(hba);
184
185                 if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
186                         spin_lock_irqsave(hba->host->host_lock, flags);
187                         ufshcd_writel(hba, 0,
188                                       REG_AUTO_HIBERNATE_IDLE_TIMER);
189                         spin_unlock_irqrestore(hba->host->host_lock,
190                                                flags);
191
192                         hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
193                         hba->ahit = 0;
194                 }
195         }
196
197         return 0;
198 }
199
200 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
201 {
202         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
203         struct device *dev = hba->dev;
204         struct device_node *np = dev->of_node;
205         int err = 0;
206
207         host->mphy = devm_of_phy_get_by_index(dev, np, 0);
208
209         if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
210                 /*
211                  * UFS driver might be probed before the phy driver does.
212                  * In that case we would like to return EPROBE_DEFER code.
213                  */
214                 err = -EPROBE_DEFER;
215                 dev_info(dev,
216                          "%s: required phy hasn't probed yet. err = %d\n",
217                         __func__, err);
218         } else if (IS_ERR(host->mphy)) {
219                 err = PTR_ERR(host->mphy);
220                 if (err != -ENODEV) {
221                         dev_info(dev, "%s: PHY get failed %d\n", __func__,
222                                  err);
223                 }
224         }
225
226         if (err)
227                 host->mphy = NULL;
228         /*
229          * Allow unbound mphy because not every platform needs specific
230          * mphy control.
231          */
232         if (err == -ENODEV)
233                 err = 0;
234
235         return err;
236 }
237
238 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
239 {
240         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
241         struct arm_smccc_res res;
242         ktime_t timeout, time_checked;
243         u32 value;
244
245         if (host->ref_clk_enabled == on)
246                 return 0;
247
248         if (on) {
249                 ufs_mtk_ref_clk_notify(on, res);
250                 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
251         } else {
252                 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
253                 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
254         }
255
256         /* Wait for ack */
257         timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
258         do {
259                 time_checked = ktime_get();
260                 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
261
262                 /* Wait until ack bit equals to req bit */
263                 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
264                         goto out;
265
266                 usleep_range(100, 200);
267         } while (ktime_before(time_checked, timeout));
268
269         dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
270
271         ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
272
273         return -ETIMEDOUT;
274
275 out:
276         host->ref_clk_enabled = on;
277         if (on)
278                 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
279         else
280                 ufs_mtk_ref_clk_notify(on, res);
281
282         return 0;
283 }
284
285 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
286                                           u16 gating_us)
287 {
288         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
289
290         if (hba->dev_info.clk_gating_wait_us) {
291                 host->ref_clk_gating_wait_us =
292                         hba->dev_info.clk_gating_wait_us;
293         } else {
294                 host->ref_clk_gating_wait_us = gating_us;
295         }
296
297         host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
298 }
299
300 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
301 {
302         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
303
304         if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
305                 ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
306                 ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
307                 ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
308                 ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
309                 ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
310         } else {
311                 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
312         }
313 }
314
315 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
316                             unsigned long retry_ms)
317 {
318         u64 timeout, time_checked;
319         u32 val, sm;
320         bool wait_idle;
321
322         /* cannot use plain ktime_get() in suspend */
323         timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
324
325         /* wait a specific time after check base */
326         udelay(10);
327         wait_idle = false;
328
329         do {
330                 time_checked = ktime_get_mono_fast_ns();
331                 ufs_mtk_dbg_sel(hba);
332                 val = ufshcd_readl(hba, REG_UFS_PROBE);
333
334                 sm = val & 0x1f;
335
336                 /*
337                  * if state is in H8 enter and H8 enter confirm
338                  * wait until return to idle state.
339                  */
340                 if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
341                         wait_idle = true;
342                         udelay(50);
343                         continue;
344                 } else if (!wait_idle)
345                         break;
346
347                 if (wait_idle && (sm == VS_HCE_BASE))
348                         break;
349         } while (time_checked < timeout);
350
351         if (wait_idle && sm != VS_HCE_BASE)
352                 dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
353 }
354
355 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
356                                    unsigned long max_wait_ms)
357 {
358         ktime_t timeout, time_checked;
359         u32 val;
360
361         timeout = ktime_add_ms(ktime_get(), max_wait_ms);
362         do {
363                 time_checked = ktime_get();
364                 ufs_mtk_dbg_sel(hba);
365                 val = ufshcd_readl(hba, REG_UFS_PROBE);
366                 val = val >> 28;
367
368                 if (val == state)
369                         return 0;
370
371                 /* Sleep for max. 200us */
372                 usleep_range(100, 200);
373         } while (ktime_before(time_checked, timeout));
374
375         if (val == state)
376                 return 0;
377
378         return -ETIMEDOUT;
379 }
380
381 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
382 {
383         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
384         struct phy *mphy = host->mphy;
385         struct arm_smccc_res res;
386         int ret = 0;
387
388         if (!mphy || !(on ^ host->mphy_powered_on))
389                 return 0;
390
391         if (on) {
392                 if (ufs_mtk_is_va09_supported(hba)) {
393                         ret = regulator_enable(host->reg_va09);
394                         if (ret < 0)
395                                 goto out;
396                         /* wait 200 us to stablize VA09 */
397                         usleep_range(200, 210);
398                         ufs_mtk_va09_pwr_ctrl(res, 1);
399                 }
400                 phy_power_on(mphy);
401         } else {
402                 phy_power_off(mphy);
403                 if (ufs_mtk_is_va09_supported(hba)) {
404                         ufs_mtk_va09_pwr_ctrl(res, 0);
405                         ret = regulator_disable(host->reg_va09);
406                         if (ret < 0)
407                                 goto out;
408                 }
409         }
410 out:
411         if (ret) {
412                 dev_info(hba->dev,
413                          "failed to %s va09: %d\n",
414                          on ? "enable" : "disable",
415                          ret);
416         } else {
417                 host->mphy_powered_on = on;
418         }
419
420         return ret;
421 }
422
423 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
424                                 struct clk **clk_out)
425 {
426         struct clk *clk;
427         int err = 0;
428
429         clk = devm_clk_get(dev, name);
430         if (IS_ERR(clk))
431                 err = PTR_ERR(clk);
432         else
433                 *clk_out = clk;
434
435         return err;
436 }
437
438 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
439 {
440         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
441         struct ufs_mtk_crypt_cfg *cfg;
442         struct regulator *reg;
443         int volt, ret;
444
445         if (!ufs_mtk_is_boost_crypt_enabled(hba))
446                 return;
447
448         cfg = host->crypt;
449         volt = cfg->vcore_volt;
450         reg = cfg->reg_vcore;
451
452         ret = clk_prepare_enable(cfg->clk_crypt_mux);
453         if (ret) {
454                 dev_info(hba->dev, "clk_prepare_enable(): %d\n",
455                          ret);
456                 return;
457         }
458
459         if (boost) {
460                 ret = regulator_set_voltage(reg, volt, INT_MAX);
461                 if (ret) {
462                         dev_info(hba->dev,
463                                  "failed to set vcore to %d\n", volt);
464                         goto out;
465                 }
466
467                 ret = clk_set_parent(cfg->clk_crypt_mux,
468                                      cfg->clk_crypt_perf);
469                 if (ret) {
470                         dev_info(hba->dev,
471                                  "failed to set clk_crypt_perf\n");
472                         regulator_set_voltage(reg, 0, INT_MAX);
473                         goto out;
474                 }
475         } else {
476                 ret = clk_set_parent(cfg->clk_crypt_mux,
477                                      cfg->clk_crypt_lp);
478                 if (ret) {
479                         dev_info(hba->dev,
480                                  "failed to set clk_crypt_lp\n");
481                         goto out;
482                 }
483
484                 ret = regulator_set_voltage(reg, 0, INT_MAX);
485                 if (ret) {
486                         dev_info(hba->dev,
487                                  "failed to set vcore to MIN\n");
488                 }
489         }
490 out:
491         clk_disable_unprepare(cfg->clk_crypt_mux);
492 }
493
494 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
495                                  struct clk **clk)
496 {
497         int ret;
498
499         ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
500         if (ret) {
501                 dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
502                          name, ret);
503         }
504
505         return ret;
506 }
507
508 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
509 {
510         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
511         struct ufs_mtk_crypt_cfg *cfg;
512         struct device *dev = hba->dev;
513         struct regulator *reg;
514         u32 volt;
515
516         host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
517                                    GFP_KERNEL);
518         if (!host->crypt)
519                 goto disable_caps;
520
521         reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
522         if (IS_ERR(reg)) {
523                 dev_info(dev, "failed to get dvfsrc-vcore: %ld",
524                          PTR_ERR(reg));
525                 goto disable_caps;
526         }
527
528         if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
529                                  &volt)) {
530                 dev_info(dev, "failed to get boost-crypt-vcore-min");
531                 goto disable_caps;
532         }
533
534         cfg = host->crypt;
535         if (ufs_mtk_init_host_clk(hba, "crypt_mux",
536                                   &cfg->clk_crypt_mux))
537                 goto disable_caps;
538
539         if (ufs_mtk_init_host_clk(hba, "crypt_lp",
540                                   &cfg->clk_crypt_lp))
541                 goto disable_caps;
542
543         if (ufs_mtk_init_host_clk(hba, "crypt_perf",
544                                   &cfg->clk_crypt_perf))
545                 goto disable_caps;
546
547         cfg->reg_vcore = reg;
548         cfg->vcore_volt = volt;
549         host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
550
551 disable_caps:
552         return;
553 }
554
555 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
556 {
557         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
558
559         host->reg_va09 = regulator_get(hba->dev, "va09");
560         if (!host->reg_va09)
561                 dev_info(hba->dev, "failed to get va09");
562         else
563                 host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
564 }
565
566 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
567 {
568         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
569         struct device_node *np = hba->dev->of_node;
570
571         if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
572                 ufs_mtk_init_boost_crypt(hba);
573
574         if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
575                 ufs_mtk_init_va09_pwr_ctrl(hba);
576
577         if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
578                 host->caps |= UFS_MTK_CAP_DISABLE_AH8;
579
580         if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
581                 host->caps |= UFS_MTK_CAP_BROKEN_VCC;
582
583         dev_info(hba->dev, "caps: 0x%x", host->caps);
584 }
585
586 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool up)
587 {
588         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
589
590         ufs_mtk_boost_crypt(hba, up);
591         ufs_mtk_setup_ref_clk(hba, up);
592
593         if (up)
594                 phy_power_on(host->mphy);
595         else
596                 phy_power_off(host->mphy);
597 }
598
599 /**
600  * ufs_mtk_setup_clocks - enables/disable clocks
601  * @hba: host controller instance
602  * @on: If true, enable clocks else disable them.
603  * @status: PRE_CHANGE or POST_CHANGE notify
604  *
605  * Returns 0 on success, non-zero on failure.
606  */
607 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
608                                 enum ufs_notify_change_status status)
609 {
610         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
611         bool clk_pwr_off = false;
612         int ret = 0;
613
614         /*
615          * In case ufs_mtk_init() is not yet done, simply ignore.
616          * This ufs_mtk_setup_clocks() shall be called from
617          * ufs_mtk_init() after init is done.
618          */
619         if (!host)
620                 return 0;
621
622         if (!on && status == PRE_CHANGE) {
623                 if (ufshcd_is_link_off(hba)) {
624                         clk_pwr_off = true;
625                 } else if (ufshcd_is_link_hibern8(hba) ||
626                          (!ufshcd_can_hibern8_during_gating(hba) &&
627                          ufshcd_is_auto_hibern8_enabled(hba))) {
628                         /*
629                          * Gate ref-clk and poweroff mphy if link state is in
630                          * OFF or Hibern8 by either Auto-Hibern8 or
631                          * ufshcd_link_state_transition().
632                          */
633                         ret = ufs_mtk_wait_link_state(hba,
634                                                       VS_LINK_HIBERN8,
635                                                       15);
636                         if (!ret)
637                                 clk_pwr_off = true;
638                 }
639
640                 if (clk_pwr_off)
641                         ufs_mtk_scale_perf(hba, false);
642         } else if (on && status == POST_CHANGE) {
643                 ufs_mtk_scale_perf(hba, true);
644         }
645
646         return ret;
647 }
648
649 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
650 {
651         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
652         int ret, ver = 0;
653
654         if (host->hw_ver.major)
655                 return;
656
657         /* Set default (minimum) version anyway */
658         host->hw_ver.major = 2;
659
660         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
661         if (!ret) {
662                 if (ver >= UFS_UNIPRO_VER_1_8) {
663                         host->hw_ver.major = 3;
664                         /*
665                          * Fix HCI version for some platforms with
666                          * incorrect version
667                          */
668                         if (hba->ufs_version < ufshci_version(3, 0))
669                                 hba->ufs_version = ufshci_version(3, 0);
670                 }
671         }
672 }
673
674 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
675 {
676         return hba->ufs_version;
677 }
678
679 /**
680  * ufs_mtk_init - find other essential mmio bases
681  * @hba: host controller instance
682  *
683  * Binds PHY with controller and powers up PHY enabling clocks
684  * and regulators.
685  *
686  * Returns -EPROBE_DEFER if binding fails, returns negative error
687  * on phy power up failure and returns zero on success.
688  */
689 static int ufs_mtk_init(struct ufs_hba *hba)
690 {
691         const struct of_device_id *id;
692         struct device *dev = hba->dev;
693         struct ufs_mtk_host *host;
694         int err = 0;
695
696         host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
697         if (!host) {
698                 err = -ENOMEM;
699                 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
700                 goto out;
701         }
702
703         host->hba = hba;
704         ufshcd_set_variant(hba, host);
705
706         id = of_match_device(ufs_mtk_of_match, dev);
707         if (!id) {
708                 err = -EINVAL;
709                 goto out;
710         }
711
712         /* Initialize host capability */
713         ufs_mtk_init_host_caps(hba);
714
715         err = ufs_mtk_bind_mphy(hba);
716         if (err)
717                 goto out_variant_clear;
718
719         ufs_mtk_init_reset(hba);
720
721         /* Enable runtime autosuspend */
722         hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
723
724         /* Enable clock-gating */
725         hba->caps |= UFSHCD_CAP_CLK_GATING;
726
727         /* Enable inline encryption */
728         hba->caps |= UFSHCD_CAP_CRYPTO;
729
730         /* Enable WriteBooster */
731         hba->caps |= UFSHCD_CAP_WB_EN;
732         hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
733         hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
734
735         if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
736                 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
737
738         /*
739          * ufshcd_vops_init() is invoked after
740          * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
741          * phy clock setup is skipped.
742          *
743          * Enable phy clocks specifically here.
744          */
745         ufs_mtk_mphy_power_on(hba, true);
746         ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
747
748         host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
749
750         goto out;
751
752 out_variant_clear:
753         ufshcd_set_variant(hba, NULL);
754 out:
755         return err;
756 }
757
758 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
759                                   struct ufs_pa_layer_attr *dev_max_params,
760                                   struct ufs_pa_layer_attr *dev_req_params)
761 {
762         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
763         struct ufs_dev_params host_cap;
764         int ret;
765
766         ufshcd_init_pwr_dev_param(&host_cap);
767         host_cap.hs_rx_gear = UFS_HS_G4;
768         host_cap.hs_tx_gear = UFS_HS_G4;
769
770         ret = ufshcd_get_pwr_dev_param(&host_cap,
771                                        dev_max_params,
772                                        dev_req_params);
773         if (ret) {
774                 pr_info("%s: failed to determine capabilities\n",
775                         __func__);
776         }
777
778         if (host->hw_ver.major >= 3) {
779                 ret = ufshcd_dme_configure_adapt(hba,
780                                            dev_req_params->gear_tx,
781                                            PA_INITIAL_ADAPT);
782         }
783
784         return ret;
785 }
786
787 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
788                                      enum ufs_notify_change_status stage,
789                                      struct ufs_pa_layer_attr *dev_max_params,
790                                      struct ufs_pa_layer_attr *dev_req_params)
791 {
792         int ret = 0;
793
794         switch (stage) {
795         case PRE_CHANGE:
796                 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
797                                              dev_req_params);
798                 break;
799         case POST_CHANGE:
800                 break;
801         default:
802                 ret = -EINVAL;
803                 break;
804         }
805
806         return ret;
807 }
808
809 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
810 {
811         int ret;
812         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
813
814         ret = ufshcd_dme_set(hba,
815                              UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
816                              lpm ? 1 : 0);
817         if (!ret || !lpm) {
818                 /*
819                  * Forcibly set as non-LPM mode if UIC commands is failed
820                  * to use default hba_enable_delay_us value for re-enabling
821                  * the host.
822                  */
823                 host->unipro_lpm = lpm;
824         }
825
826         return ret;
827 }
828
829 static int ufs_mtk_pre_link(struct ufs_hba *hba)
830 {
831         int ret;
832         u32 tmp;
833
834         ufs_mtk_get_controller_version(hba);
835
836         ret = ufs_mtk_unipro_set_lpm(hba, false);
837         if (ret)
838                 return ret;
839
840         /*
841          * Setting PA_Local_TX_LCC_Enable to 0 before link startup
842          * to make sure that both host and device TX LCC are disabled
843          * once link startup is completed.
844          */
845         ret = ufshcd_disable_host_tx_lcc(hba);
846         if (ret)
847                 return ret;
848
849         /* disable deep stall */
850         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
851         if (ret)
852                 return ret;
853
854         tmp &= ~(1 << 6);
855
856         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
857
858         return ret;
859 }
860
861 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
862 {
863         unsigned long flags;
864         u32 ah_ms;
865
866         if (ufshcd_is_clkgating_allowed(hba)) {
867                 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
868                         ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
869                                           hba->ahit);
870                 else
871                         ah_ms = 10;
872                 spin_lock_irqsave(hba->host->host_lock, flags);
873                 hba->clk_gating.delay_ms = ah_ms + 5;
874                 spin_unlock_irqrestore(hba->host->host_lock, flags);
875         }
876 }
877
878 static int ufs_mtk_post_link(struct ufs_hba *hba)
879 {
880         /* enable unipro clock gating feature */
881         ufs_mtk_cfg_unipro_cg(hba, true);
882
883         /* will be configured during probe hba */
884         if (ufshcd_is_auto_hibern8_supported(hba))
885                 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
886                         FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
887
888         ufs_mtk_setup_clk_gating(hba);
889
890         return 0;
891 }
892
893 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
894                                        enum ufs_notify_change_status stage)
895 {
896         int ret = 0;
897
898         switch (stage) {
899         case PRE_CHANGE:
900                 ret = ufs_mtk_pre_link(hba);
901                 break;
902         case POST_CHANGE:
903                 ret = ufs_mtk_post_link(hba);
904                 break;
905         default:
906                 ret = -EINVAL;
907                 break;
908         }
909
910         return ret;
911 }
912
913 static int ufs_mtk_device_reset(struct ufs_hba *hba)
914 {
915         struct arm_smccc_res res;
916
917         /* disable hba before device reset */
918         ufshcd_hba_stop(hba);
919
920         ufs_mtk_device_reset_ctrl(0, res);
921
922         /*
923          * The reset signal is active low. UFS devices shall detect
924          * more than or equal to 1us of positive or negative RST_n
925          * pulse width.
926          *
927          * To be on safe side, keep the reset low for at least 10us.
928          */
929         usleep_range(10, 15);
930
931         ufs_mtk_device_reset_ctrl(1, res);
932
933         /* Some devices may need time to respond to rst_n */
934         usleep_range(10000, 15000);
935
936         dev_info(hba->dev, "device reset done\n");
937
938         return 0;
939 }
940
941 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
942 {
943         int err;
944
945         err = ufshcd_hba_enable(hba);
946         if (err)
947                 return err;
948
949         err = ufs_mtk_unipro_set_lpm(hba, false);
950         if (err)
951                 return err;
952
953         err = ufshcd_uic_hibern8_exit(hba);
954         if (!err)
955                 ufshcd_set_link_active(hba);
956         else
957                 return err;
958
959         err = ufshcd_make_hba_operational(hba);
960         if (err)
961                 return err;
962
963         return 0;
964 }
965
966 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
967 {
968         int err;
969
970         err = ufs_mtk_unipro_set_lpm(hba, true);
971         if (err) {
972                 /* Resume UniPro state for following error recovery */
973                 ufs_mtk_unipro_set_lpm(hba, false);
974                 return err;
975         }
976
977         return 0;
978 }
979
980 static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
981 {
982         if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
983                 return;
984
985         if (lpm && !hba->vreg_info.vcc->enabled)
986                 regulator_set_mode(hba->vreg_info.vccq2->reg,
987                                    REGULATOR_MODE_IDLE);
988         else if (!lpm)
989                 regulator_set_mode(hba->vreg_info.vccq2->reg,
990                                    REGULATOR_MODE_NORMAL);
991 }
992
993 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
994 {
995         unsigned long flags;
996         int ret;
997
998         /* disable auto-hibern8 */
999         spin_lock_irqsave(hba->host->host_lock, flags);
1000         ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1001         spin_unlock_irqrestore(hba->host->host_lock, flags);
1002
1003         /* wait host return to idle state when auto-hibern8 off */
1004         ufs_mtk_wait_idle_state(hba, 5);
1005
1006         ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1007         if (ret)
1008                 dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1009 }
1010
1011 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1012         enum ufs_notify_change_status status)
1013 {
1014         int err;
1015         struct arm_smccc_res res;
1016
1017         if (status == PRE_CHANGE) {
1018                 if (!ufshcd_is_auto_hibern8_supported(hba))
1019                         return 0;
1020                 ufs_mtk_auto_hibern8_disable(hba);
1021                 return 0;
1022         }
1023
1024         if (ufshcd_is_link_hibern8(hba)) {
1025                 err = ufs_mtk_link_set_lpm(hba);
1026                 if (err)
1027                         goto fail;
1028         }
1029
1030         if (!ufshcd_is_link_active(hba)) {
1031                 /*
1032                  * Make sure no error will be returned to prevent
1033                  * ufshcd_suspend() re-enabling regulators while vreg is still
1034                  * in low-power mode.
1035                  */
1036                 ufs_mtk_vreg_set_lpm(hba, true);
1037                 err = ufs_mtk_mphy_power_on(hba, false);
1038                 if (err)
1039                         goto fail;
1040         }
1041
1042         if (ufshcd_is_link_off(hba))
1043                 ufs_mtk_device_reset_ctrl(0, res);
1044
1045         return 0;
1046 fail:
1047         /*
1048          * Set link as off state enforcedly to trigger
1049          * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1050          * for completed host reset.
1051          */
1052         ufshcd_set_link_off(hba);
1053         return -EAGAIN;
1054 }
1055
1056 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1057 {
1058         int err;
1059
1060         err = ufs_mtk_mphy_power_on(hba, true);
1061         if (err)
1062                 goto fail;
1063
1064         ufs_mtk_vreg_set_lpm(hba, false);
1065
1066         if (ufshcd_is_link_hibern8(hba)) {
1067                 err = ufs_mtk_link_set_hpm(hba);
1068                 if (err)
1069                         goto fail;
1070         }
1071
1072         return 0;
1073 fail:
1074         return ufshcd_link_recovery(hba);
1075 }
1076
1077 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1078 {
1079         ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
1080
1081         ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1082
1083         ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1084                          REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1085                          "MPHY Ctrl ");
1086
1087         /* Direct debugging information to REG_MTK_PROBE */
1088         ufs_mtk_dbg_sel(hba);
1089         ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1090 }
1091
1092 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1093 {
1094         struct ufs_dev_info *dev_info = &hba->dev_info;
1095         u16 mid = dev_info->wmanufacturerid;
1096
1097         if (mid == UFS_VENDOR_SAMSUNG)
1098                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1099
1100         /*
1101          * Decide waiting time before gating reference clock and
1102          * after ungating reference clock according to vendors'
1103          * requirements.
1104          */
1105         if (mid == UFS_VENDOR_SAMSUNG)
1106                 ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1107         else if (mid == UFS_VENDOR_SKHYNIX)
1108                 ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1109         else if (mid == UFS_VENDOR_TOSHIBA)
1110                 ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1111         else
1112                 ufs_mtk_setup_ref_clk_wait_us(hba,
1113                                               REFCLK_DEFAULT_WAIT_US);
1114
1115         return 0;
1116 }
1117
1118 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1119 {
1120         ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1121
1122         if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1123             (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1124                 hba->vreg_info.vcc->always_on = true;
1125                 /*
1126                  * VCC will be kept always-on thus we don't
1127                  * need any delay during regulator operations
1128                  */
1129                 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1130                         UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1131         }
1132 }
1133
1134 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1135                                  enum ufs_event_type evt, void *data)
1136 {
1137         unsigned int val = *(u32 *)data;
1138
1139         trace_ufs_mtk_event(evt, val);
1140 }
1141
1142 /*
1143  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1144  *
1145  * The variant operations configure the necessary controller and PHY
1146  * handshake during initialization.
1147  */
1148 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1149         .name                = "mediatek.ufshci",
1150         .init                = ufs_mtk_init,
1151         .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1152         .setup_clocks        = ufs_mtk_setup_clocks,
1153         .hce_enable_notify   = ufs_mtk_hce_enable_notify,
1154         .link_startup_notify = ufs_mtk_link_startup_notify,
1155         .pwr_change_notify   = ufs_mtk_pwr_change_notify,
1156         .apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1157         .fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1158         .suspend             = ufs_mtk_suspend,
1159         .resume              = ufs_mtk_resume,
1160         .dbg_register_dump   = ufs_mtk_dbg_register_dump,
1161         .device_reset        = ufs_mtk_device_reset,
1162         .event_notify        = ufs_mtk_event_notify,
1163 };
1164
1165 /**
1166  * ufs_mtk_probe - probe routine of the driver
1167  * @pdev: pointer to Platform device handle
1168  *
1169  * Return zero for success and non-zero for failure
1170  */
1171 static int ufs_mtk_probe(struct platform_device *pdev)
1172 {
1173         int err;
1174         struct device *dev = &pdev->dev;
1175         struct device_node *reset_node;
1176         struct platform_device *reset_pdev;
1177         struct device_link *link;
1178
1179         reset_node = of_find_compatible_node(NULL, NULL,
1180                                              "ti,syscon-reset");
1181         if (!reset_node) {
1182                 dev_notice(dev, "find ti,syscon-reset fail\n");
1183                 goto skip_reset;
1184         }
1185         reset_pdev = of_find_device_by_node(reset_node);
1186         if (!reset_pdev) {
1187                 dev_notice(dev, "find reset_pdev fail\n");
1188                 goto skip_reset;
1189         }
1190         link = device_link_add(dev, &reset_pdev->dev,
1191                 DL_FLAG_AUTOPROBE_CONSUMER);
1192         put_device(&reset_pdev->dev);
1193         if (!link) {
1194                 dev_notice(dev, "add reset device_link fail\n");
1195                 goto skip_reset;
1196         }
1197         /* supplier is not probed */
1198         if (link->status == DL_STATE_DORMANT) {
1199                 err = -EPROBE_DEFER;
1200                 goto out;
1201         }
1202
1203 skip_reset:
1204         /* perform generic probe */
1205         err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1206
1207 out:
1208         if (err)
1209                 dev_info(dev, "probe failed %d\n", err);
1210
1211         of_node_put(reset_node);
1212         return err;
1213 }
1214
1215 /**
1216  * ufs_mtk_remove - set driver_data of the device to NULL
1217  * @pdev: pointer to platform device handle
1218  *
1219  * Always return 0
1220  */
1221 static int ufs_mtk_remove(struct platform_device *pdev)
1222 {
1223         struct ufs_hba *hba =  platform_get_drvdata(pdev);
1224
1225         pm_runtime_get_sync(&(pdev)->dev);
1226         ufshcd_remove(hba);
1227         return 0;
1228 }
1229
1230 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1231         SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
1232         SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
1233         .prepare         = ufshcd_suspend_prepare,
1234         .complete        = ufshcd_resume_complete,
1235 };
1236
1237 static struct platform_driver ufs_mtk_pltform = {
1238         .probe      = ufs_mtk_probe,
1239         .remove     = ufs_mtk_remove,
1240         .shutdown   = ufshcd_pltfrm_shutdown,
1241         .driver = {
1242                 .name   = "ufshcd-mtk",
1243                 .pm     = &ufs_mtk_pm_ops,
1244                 .of_match_table = ufs_mtk_of_match,
1245         },
1246 };
1247
1248 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1249 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1250 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1251 MODULE_LICENSE("GPL v2");
1252
1253 module_platform_driver(ufs_mtk_pltform);