Merge tag 's390-5.11-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[linux-2.6-microblaze.git] / drivers / scsi / ufs / ufs-mediatek.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *      Stanley Chu <stanley.chu@mediatek.com>
6  *      Peter Wang <peter.wang@mediatek.com>
7  */
8
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/of.h>
12 #include <linux/of_address.h>
13 #include <linux/of_device.h>
14 #include <linux/phy/phy.h>
15 #include <linux/platform_device.h>
16 #include <linux/regulator/consumer.h>
17 #include <linux/reset.h>
18 #include <linux/soc/mediatek/mtk_sip_svc.h>
19
20 #include "ufshcd.h"
21 #include "ufshcd-crypto.h"
22 #include "ufshcd-pltfrm.h"
23 #include "ufs_quirks.h"
24 #include "unipro.h"
25 #include "ufs-mediatek.h"
26
27 #define CREATE_TRACE_POINTS
28 #include "ufs-mediatek-trace.h"
29
30 #define ufs_mtk_smc(cmd, val, res) \
31         arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
32                       cmd, val, 0, 0, 0, 0, 0, &(res))
33
34 #define ufs_mtk_va09_pwr_ctrl(res, on) \
35         ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res)
36
37 #define ufs_mtk_crypto_ctrl(res, enable) \
38         ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
39
40 #define ufs_mtk_ref_clk_notify(on, res) \
41         ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
42
43 #define ufs_mtk_device_reset_ctrl(high, res) \
44         ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
45
46 static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
47         UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
48                 UFS_DEVICE_QUIRK_DELAY_AFTER_LPM),
49         UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
50                 UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
51         END_FIX
52 };
53
54 static const struct of_device_id ufs_mtk_of_match[] = {
55         { .compatible = "mediatek,mt8183-ufshci" },
56         {},
57 };
58
59 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
60 {
61         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
62
63         return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
64 }
65
66 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
67 {
68         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
69
70         return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
71 }
72
73 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
74 {
75         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
76
77         return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
78 }
79
80 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
81 {
82         u32 tmp;
83
84         if (enable) {
85                 ufshcd_dme_get(hba,
86                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
87                 tmp = tmp |
88                       (1 << RX_SYMBOL_CLK_GATE_EN) |
89                       (1 << SYS_CLK_GATE_EN) |
90                       (1 << TX_CLK_GATE_EN);
91                 ufshcd_dme_set(hba,
92                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
93
94                 ufshcd_dme_get(hba,
95                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
96                 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
97                 ufshcd_dme_set(hba,
98                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
99         } else {
100                 ufshcd_dme_get(hba,
101                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
102                 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
103                               (1 << SYS_CLK_GATE_EN) |
104                               (1 << TX_CLK_GATE_EN));
105                 ufshcd_dme_set(hba,
106                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
107
108                 ufshcd_dme_get(hba,
109                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
110                 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
111                 ufshcd_dme_set(hba,
112                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
113         }
114 }
115
116 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
117 {
118         struct arm_smccc_res res;
119
120         ufs_mtk_crypto_ctrl(res, 1);
121         if (res.a0) {
122                 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
123                          __func__, res.a0);
124                 hba->caps &= ~UFSHCD_CAP_CRYPTO;
125         }
126 }
127
128 static void ufs_mtk_host_reset(struct ufs_hba *hba)
129 {
130         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
131
132         reset_control_assert(host->hci_reset);
133         reset_control_assert(host->crypto_reset);
134         reset_control_assert(host->unipro_reset);
135
136         usleep_range(100, 110);
137
138         reset_control_deassert(host->unipro_reset);
139         reset_control_deassert(host->crypto_reset);
140         reset_control_deassert(host->hci_reset);
141 }
142
143 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
144                                        struct reset_control **rc,
145                                        char *str)
146 {
147         *rc = devm_reset_control_get(hba->dev, str);
148         if (IS_ERR(*rc)) {
149                 dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
150                          str, PTR_ERR(*rc));
151                 *rc = NULL;
152         }
153 }
154
155 static void ufs_mtk_init_reset(struct ufs_hba *hba)
156 {
157         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
158
159         ufs_mtk_init_reset_control(hba, &host->hci_reset,
160                                    "hci_rst");
161         ufs_mtk_init_reset_control(hba, &host->unipro_reset,
162                                    "unipro_rst");
163         ufs_mtk_init_reset_control(hba, &host->crypto_reset,
164                                    "crypto_rst");
165 }
166
167 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
168                                      enum ufs_notify_change_status status)
169 {
170         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
171         unsigned long flags;
172
173         if (status == PRE_CHANGE) {
174                 if (host->unipro_lpm) {
175                         hba->vps->hba_enable_delay_us = 0;
176                 } else {
177                         hba->vps->hba_enable_delay_us = 600;
178                         ufs_mtk_host_reset(hba);
179                 }
180
181                 if (hba->caps & UFSHCD_CAP_CRYPTO)
182                         ufs_mtk_crypto_enable(hba);
183
184                 if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
185                         spin_lock_irqsave(hba->host->host_lock, flags);
186                         ufshcd_writel(hba, 0,
187                                       REG_AUTO_HIBERNATE_IDLE_TIMER);
188                         spin_unlock_irqrestore(hba->host->host_lock,
189                                                flags);
190
191                         hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
192                         hba->ahit = 0;
193                 }
194         }
195
196         return 0;
197 }
198
199 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
200 {
201         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
202         struct device *dev = hba->dev;
203         struct device_node *np = dev->of_node;
204         int err = 0;
205
206         host->mphy = devm_of_phy_get_by_index(dev, np, 0);
207
208         if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
209                 /*
210                  * UFS driver might be probed before the phy driver does.
211                  * In that case we would like to return EPROBE_DEFER code.
212                  */
213                 err = -EPROBE_DEFER;
214                 dev_info(dev,
215                          "%s: required phy hasn't probed yet. err = %d\n",
216                         __func__, err);
217         } else if (IS_ERR(host->mphy)) {
218                 err = PTR_ERR(host->mphy);
219                 if (err != -ENODEV) {
220                         dev_info(dev, "%s: PHY get failed %d\n", __func__,
221                                  err);
222                 }
223         }
224
225         if (err)
226                 host->mphy = NULL;
227         /*
228          * Allow unbound mphy because not every platform needs specific
229          * mphy control.
230          */
231         if (err == -ENODEV)
232                 err = 0;
233
234         return err;
235 }
236
237 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
238 {
239         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
240         struct arm_smccc_res res;
241         ktime_t timeout, time_checked;
242         u32 value;
243
244         if (host->ref_clk_enabled == on)
245                 return 0;
246
247         if (on) {
248                 ufs_mtk_ref_clk_notify(on, res);
249                 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
250                 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
251         } else {
252                 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
253         }
254
255         /* Wait for ack */
256         timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
257         do {
258                 time_checked = ktime_get();
259                 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
260
261                 /* Wait until ack bit equals to req bit */
262                 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
263                         goto out;
264
265                 usleep_range(100, 200);
266         } while (ktime_before(time_checked, timeout));
267
268         dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
269
270         ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
271
272         return -ETIMEDOUT;
273
274 out:
275         host->ref_clk_enabled = on;
276         if (!on) {
277                 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
278                 ufs_mtk_ref_clk_notify(on, res);
279         }
280
281         return 0;
282 }
283
284 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
285                                           u16 gating_us, u16 ungating_us)
286 {
287         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
288
289         if (hba->dev_info.clk_gating_wait_us) {
290                 host->ref_clk_gating_wait_us =
291                         hba->dev_info.clk_gating_wait_us;
292         } else {
293                 host->ref_clk_gating_wait_us = gating_us;
294         }
295
296         host->ref_clk_ungating_wait_us = ungating_us;
297 }
298
299 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
300                                    unsigned long max_wait_ms)
301 {
302         ktime_t timeout, time_checked;
303         u32 val;
304
305         timeout = ktime_add_ms(ktime_get(), max_wait_ms);
306         do {
307                 time_checked = ktime_get();
308                 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
309                 val = ufshcd_readl(hba, REG_UFS_PROBE);
310                 val = val >> 28;
311
312                 if (val == state)
313                         return 0;
314
315                 /* Sleep for max. 200us */
316                 usleep_range(100, 200);
317         } while (ktime_before(time_checked, timeout));
318
319         if (val == state)
320                 return 0;
321
322         return -ETIMEDOUT;
323 }
324
325 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
326 {
327         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
328         struct phy *mphy = host->mphy;
329         struct arm_smccc_res res;
330         int ret = 0;
331
332         if (!mphy || !(on ^ host->mphy_powered_on))
333                 return 0;
334
335         if (on) {
336                 if (ufs_mtk_is_va09_supported(hba)) {
337                         ret = regulator_enable(host->reg_va09);
338                         if (ret < 0)
339                                 goto out;
340                         /* wait 200 us to stablize VA09 */
341                         usleep_range(200, 210);
342                         ufs_mtk_va09_pwr_ctrl(res, 1);
343                 }
344                 phy_power_on(mphy);
345         } else {
346                 phy_power_off(mphy);
347                 if (ufs_mtk_is_va09_supported(hba)) {
348                         ufs_mtk_va09_pwr_ctrl(res, 0);
349                         ret = regulator_disable(host->reg_va09);
350                         if (ret < 0)
351                                 goto out;
352                 }
353         }
354 out:
355         if (ret) {
356                 dev_info(hba->dev,
357                          "failed to %s va09: %d\n",
358                          on ? "enable" : "disable",
359                          ret);
360         } else {
361                 host->mphy_powered_on = on;
362         }
363
364         return ret;
365 }
366
367 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
368                                 struct clk **clk_out)
369 {
370         struct clk *clk;
371         int err = 0;
372
373         clk = devm_clk_get(dev, name);
374         if (IS_ERR(clk))
375                 err = PTR_ERR(clk);
376         else
377                 *clk_out = clk;
378
379         return err;
380 }
381
382 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
383 {
384         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
385         struct ufs_mtk_crypt_cfg *cfg;
386         struct regulator *reg;
387         int volt, ret;
388
389         if (!ufs_mtk_is_boost_crypt_enabled(hba))
390                 return;
391
392         cfg = host->crypt;
393         volt = cfg->vcore_volt;
394         reg = cfg->reg_vcore;
395
396         ret = clk_prepare_enable(cfg->clk_crypt_mux);
397         if (ret) {
398                 dev_info(hba->dev, "clk_prepare_enable(): %d\n",
399                          ret);
400                 return;
401         }
402
403         if (boost) {
404                 ret = regulator_set_voltage(reg, volt, INT_MAX);
405                 if (ret) {
406                         dev_info(hba->dev,
407                                  "failed to set vcore to %d\n", volt);
408                         goto out;
409                 }
410
411                 ret = clk_set_parent(cfg->clk_crypt_mux,
412                                      cfg->clk_crypt_perf);
413                 if (ret) {
414                         dev_info(hba->dev,
415                                  "failed to set clk_crypt_perf\n");
416                         regulator_set_voltage(reg, 0, INT_MAX);
417                         goto out;
418                 }
419         } else {
420                 ret = clk_set_parent(cfg->clk_crypt_mux,
421                                      cfg->clk_crypt_lp);
422                 if (ret) {
423                         dev_info(hba->dev,
424                                  "failed to set clk_crypt_lp\n");
425                         goto out;
426                 }
427
428                 ret = regulator_set_voltage(reg, 0, INT_MAX);
429                 if (ret) {
430                         dev_info(hba->dev,
431                                  "failed to set vcore to MIN\n");
432                 }
433         }
434 out:
435         clk_disable_unprepare(cfg->clk_crypt_mux);
436 }
437
438 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
439                                  struct clk **clk)
440 {
441         int ret;
442
443         ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
444         if (ret) {
445                 dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
446                          name, ret);
447         }
448
449         return ret;
450 }
451
452 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
453 {
454         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
455         struct ufs_mtk_crypt_cfg *cfg;
456         struct device *dev = hba->dev;
457         struct regulator *reg;
458         u32 volt;
459
460         host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
461                                    GFP_KERNEL);
462         if (!host->crypt)
463                 goto disable_caps;
464
465         reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
466         if (IS_ERR(reg)) {
467                 dev_info(dev, "failed to get dvfsrc-vcore: %ld",
468                          PTR_ERR(reg));
469                 goto disable_caps;
470         }
471
472         if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
473                                  &volt)) {
474                 dev_info(dev, "failed to get boost-crypt-vcore-min");
475                 goto disable_caps;
476         }
477
478         cfg = host->crypt;
479         if (ufs_mtk_init_host_clk(hba, "crypt_mux",
480                                   &cfg->clk_crypt_mux))
481                 goto disable_caps;
482
483         if (ufs_mtk_init_host_clk(hba, "crypt_lp",
484                                   &cfg->clk_crypt_lp))
485                 goto disable_caps;
486
487         if (ufs_mtk_init_host_clk(hba, "crypt_perf",
488                                   &cfg->clk_crypt_perf))
489                 goto disable_caps;
490
491         cfg->reg_vcore = reg;
492         cfg->vcore_volt = volt;
493         host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
494
495 disable_caps:
496         return;
497 }
498
499 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
500 {
501         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
502
503         host->reg_va09 = regulator_get(hba->dev, "va09");
504         if (!host->reg_va09)
505                 dev_info(hba->dev, "failed to get va09");
506         else
507                 host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
508 }
509
510 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
511 {
512         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
513         struct device_node *np = hba->dev->of_node;
514
515         if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
516                 ufs_mtk_init_boost_crypt(hba);
517
518         if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
519                 ufs_mtk_init_va09_pwr_ctrl(hba);
520
521         if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
522                 host->caps |= UFS_MTK_CAP_DISABLE_AH8;
523
524         if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
525                 host->caps |= UFS_MTK_CAP_BROKEN_VCC;
526
527         dev_info(hba->dev, "caps: 0x%x", host->caps);
528 }
529
530 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool up)
531 {
532         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
533
534         ufs_mtk_boost_crypt(hba, up);
535         ufs_mtk_setup_ref_clk(hba, up);
536
537         if (up)
538                 phy_power_on(host->mphy);
539         else
540                 phy_power_off(host->mphy);
541 }
542
543 /**
544  * ufs_mtk_setup_clocks - enables/disable clocks
545  * @hba: host controller instance
546  * @on: If true, enable clocks else disable them.
547  * @status: PRE_CHANGE or POST_CHANGE notify
548  *
549  * Returns 0 on success, non-zero on failure.
550  */
551 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
552                                 enum ufs_notify_change_status status)
553 {
554         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
555         bool clk_pwr_off = false;
556         int ret = 0;
557
558         /*
559          * In case ufs_mtk_init() is not yet done, simply ignore.
560          * This ufs_mtk_setup_clocks() shall be called from
561          * ufs_mtk_init() after init is done.
562          */
563         if (!host)
564                 return 0;
565
566         if (!on && status == PRE_CHANGE) {
567                 if (ufshcd_is_link_off(hba)) {
568                         clk_pwr_off = true;
569                 } else if (ufshcd_is_link_hibern8(hba) ||
570                          (!ufshcd_can_hibern8_during_gating(hba) &&
571                          ufshcd_is_auto_hibern8_enabled(hba))) {
572                         /*
573                          * Gate ref-clk and poweroff mphy if link state is in
574                          * OFF or Hibern8 by either Auto-Hibern8 or
575                          * ufshcd_link_state_transition().
576                          */
577                         ret = ufs_mtk_wait_link_state(hba,
578                                                       VS_LINK_HIBERN8,
579                                                       15);
580                         if (!ret)
581                                 clk_pwr_off = true;
582                 }
583
584                 if (clk_pwr_off)
585                         ufs_mtk_scale_perf(hba, false);
586         } else if (on && status == POST_CHANGE) {
587                 ufs_mtk_scale_perf(hba, true);
588         }
589
590         return ret;
591 }
592
593 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
594 {
595         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
596         int ret, ver = 0;
597
598         if (host->hw_ver.major)
599                 return;
600
601         /* Set default (minimum) version anyway */
602         host->hw_ver.major = 2;
603
604         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
605         if (!ret) {
606                 if (ver >= UFS_UNIPRO_VER_1_8)
607                         host->hw_ver.major = 3;
608         }
609 }
610
611 /**
612  * ufs_mtk_init - find other essential mmio bases
613  * @hba: host controller instance
614  *
615  * Binds PHY with controller and powers up PHY enabling clocks
616  * and regulators.
617  *
618  * Returns -EPROBE_DEFER if binding fails, returns negative error
619  * on phy power up failure and returns zero on success.
620  */
621 static int ufs_mtk_init(struct ufs_hba *hba)
622 {
623         const struct of_device_id *id;
624         struct device *dev = hba->dev;
625         struct ufs_mtk_host *host;
626         int err = 0;
627
628         host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
629         if (!host) {
630                 err = -ENOMEM;
631                 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
632                 goto out;
633         }
634
635         host->hba = hba;
636         ufshcd_set_variant(hba, host);
637
638         id = of_match_device(ufs_mtk_of_match, dev);
639         if (!id) {
640                 err = -EINVAL;
641                 goto out;
642         }
643
644         /* Initialize host capability */
645         ufs_mtk_init_host_caps(hba);
646
647         err = ufs_mtk_bind_mphy(hba);
648         if (err)
649                 goto out_variant_clear;
650
651         ufs_mtk_init_reset(hba);
652
653         /* Enable runtime autosuspend */
654         hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
655
656         /* Enable clock-gating */
657         hba->caps |= UFSHCD_CAP_CLK_GATING;
658
659         /* Enable inline encryption */
660         hba->caps |= UFSHCD_CAP_CRYPTO;
661
662         /* Enable WriteBooster */
663         hba->caps |= UFSHCD_CAP_WB_EN;
664         hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
665
666         if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
667                 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
668
669         /*
670          * ufshcd_vops_init() is invoked after
671          * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
672          * phy clock setup is skipped.
673          *
674          * Enable phy clocks specifically here.
675          */
676         ufs_mtk_mphy_power_on(hba, true);
677         ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
678
679         goto out;
680
681 out_variant_clear:
682         ufshcd_set_variant(hba, NULL);
683 out:
684         return err;
685 }
686
687 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
688                                   struct ufs_pa_layer_attr *dev_max_params,
689                                   struct ufs_pa_layer_attr *dev_req_params)
690 {
691         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
692         struct ufs_dev_params host_cap;
693         int ret;
694
695         ufshcd_init_pwr_dev_param(&host_cap);
696         host_cap.hs_rx_gear = UFS_HS_G4;
697         host_cap.hs_tx_gear = UFS_HS_G4;
698
699         ret = ufshcd_get_pwr_dev_param(&host_cap,
700                                        dev_max_params,
701                                        dev_req_params);
702         if (ret) {
703                 pr_info("%s: failed to determine capabilities\n",
704                         __func__);
705         }
706
707         if (host->hw_ver.major >= 3) {
708                 ret = ufshcd_dme_configure_adapt(hba,
709                                            dev_req_params->gear_tx,
710                                            PA_INITIAL_ADAPT);
711         }
712
713         return ret;
714 }
715
716 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
717                                      enum ufs_notify_change_status stage,
718                                      struct ufs_pa_layer_attr *dev_max_params,
719                                      struct ufs_pa_layer_attr *dev_req_params)
720 {
721         int ret = 0;
722
723         switch (stage) {
724         case PRE_CHANGE:
725                 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
726                                              dev_req_params);
727                 break;
728         case POST_CHANGE:
729                 break;
730         default:
731                 ret = -EINVAL;
732                 break;
733         }
734
735         return ret;
736 }
737
738 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
739 {
740         int ret;
741         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
742
743         ret = ufshcd_dme_set(hba,
744                              UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
745                              lpm ? 1 : 0);
746         if (!ret || !lpm) {
747                 /*
748                  * Forcibly set as non-LPM mode if UIC commands is failed
749                  * to use default hba_enable_delay_us value for re-enabling
750                  * the host.
751                  */
752                 host->unipro_lpm = lpm;
753         }
754
755         return ret;
756 }
757
758 static int ufs_mtk_pre_link(struct ufs_hba *hba)
759 {
760         int ret;
761         u32 tmp;
762
763         ufs_mtk_get_controller_version(hba);
764
765         ret = ufs_mtk_unipro_set_lpm(hba, false);
766         if (ret)
767                 return ret;
768
769         /*
770          * Setting PA_Local_TX_LCC_Enable to 0 before link startup
771          * to make sure that both host and device TX LCC are disabled
772          * once link startup is completed.
773          */
774         ret = ufshcd_disable_host_tx_lcc(hba);
775         if (ret)
776                 return ret;
777
778         /* disable deep stall */
779         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
780         if (ret)
781                 return ret;
782
783         tmp &= ~(1 << 6);
784
785         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
786
787         return ret;
788 }
789
790 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
791 {
792         unsigned long flags;
793         u32 ah_ms;
794
795         if (ufshcd_is_clkgating_allowed(hba)) {
796                 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
797                         ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
798                                           hba->ahit);
799                 else
800                         ah_ms = 10;
801                 spin_lock_irqsave(hba->host->host_lock, flags);
802                 hba->clk_gating.delay_ms = ah_ms + 5;
803                 spin_unlock_irqrestore(hba->host->host_lock, flags);
804         }
805 }
806
807 static int ufs_mtk_post_link(struct ufs_hba *hba)
808 {
809         /* enable unipro clock gating feature */
810         ufs_mtk_cfg_unipro_cg(hba, true);
811
812         /* configure auto-hibern8 timer to 10ms */
813         if (ufshcd_is_auto_hibern8_supported(hba)) {
814                 ufshcd_auto_hibern8_update(hba,
815                         FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
816                         FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
817         }
818
819         ufs_mtk_setup_clk_gating(hba);
820
821         return 0;
822 }
823
824 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
825                                        enum ufs_notify_change_status stage)
826 {
827         int ret = 0;
828
829         switch (stage) {
830         case PRE_CHANGE:
831                 ret = ufs_mtk_pre_link(hba);
832                 break;
833         case POST_CHANGE:
834                 ret = ufs_mtk_post_link(hba);
835                 break;
836         default:
837                 ret = -EINVAL;
838                 break;
839         }
840
841         return ret;
842 }
843
844 static int ufs_mtk_device_reset(struct ufs_hba *hba)
845 {
846         struct arm_smccc_res res;
847
848         ufs_mtk_device_reset_ctrl(0, res);
849
850         /*
851          * The reset signal is active low. UFS devices shall detect
852          * more than or equal to 1us of positive or negative RST_n
853          * pulse width.
854          *
855          * To be on safe side, keep the reset low for at least 10us.
856          */
857         usleep_range(10, 15);
858
859         ufs_mtk_device_reset_ctrl(1, res);
860
861         /* Some devices may need time to respond to rst_n */
862         usleep_range(10000, 15000);
863
864         dev_info(hba->dev, "device reset done\n");
865
866         return 0;
867 }
868
869 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
870 {
871         int err;
872
873         err = ufshcd_hba_enable(hba);
874         if (err)
875                 return err;
876
877         err = ufs_mtk_unipro_set_lpm(hba, false);
878         if (err)
879                 return err;
880
881         err = ufshcd_uic_hibern8_exit(hba);
882         if (!err)
883                 ufshcd_set_link_active(hba);
884         else
885                 return err;
886
887         err = ufshcd_make_hba_operational(hba);
888         if (err)
889                 return err;
890
891         return 0;
892 }
893
894 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
895 {
896         int err;
897
898         err = ufs_mtk_unipro_set_lpm(hba, true);
899         if (err) {
900                 /* Resume UniPro state for following error recovery */
901                 ufs_mtk_unipro_set_lpm(hba, false);
902                 return err;
903         }
904
905         return 0;
906 }
907
908 static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
909 {
910         if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
911                 return;
912
913         if (lpm & !hba->vreg_info.vcc->enabled)
914                 regulator_set_mode(hba->vreg_info.vccq2->reg,
915                                    REGULATOR_MODE_IDLE);
916         else if (!lpm)
917                 regulator_set_mode(hba->vreg_info.vccq2->reg,
918                                    REGULATOR_MODE_NORMAL);
919 }
920
921 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
922 {
923         int err;
924
925         if (ufshcd_is_link_hibern8(hba)) {
926                 err = ufs_mtk_link_set_lpm(hba);
927                 if (err)
928                         goto fail;
929         }
930
931         if (!ufshcd_is_link_active(hba)) {
932                 /*
933                  * Make sure no error will be returned to prevent
934                  * ufshcd_suspend() re-enabling regulators while vreg is still
935                  * in low-power mode.
936                  */
937                 ufs_mtk_vreg_set_lpm(hba, true);
938                 err = ufs_mtk_mphy_power_on(hba, false);
939                 if (err)
940                         goto fail;
941         }
942
943         return 0;
944 fail:
945         /*
946          * Set link as off state enforcedly to trigger
947          * ufshcd_host_reset_and_restore() in ufshcd_suspend()
948          * for completed host reset.
949          */
950         ufshcd_set_link_off(hba);
951         return -EAGAIN;
952 }
953
954 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
955 {
956         int err;
957
958         err = ufs_mtk_mphy_power_on(hba, true);
959         if (err)
960                 goto fail;
961
962         ufs_mtk_vreg_set_lpm(hba, false);
963
964         if (ufshcd_is_link_hibern8(hba)) {
965                 err = ufs_mtk_link_set_hpm(hba);
966                 if (err)
967                         goto fail;
968         }
969
970         return 0;
971 fail:
972         return ufshcd_link_recovery(hba);
973 }
974
975 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
976 {
977         ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
978
979         ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
980
981         ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
982                          REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
983                          "MPHY Ctrl ");
984
985         /* Direct debugging information to REG_MTK_PROBE */
986         ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
987         ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
988 }
989
990 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
991 {
992         struct ufs_dev_info *dev_info = &hba->dev_info;
993         u16 mid = dev_info->wmanufacturerid;
994
995         if (mid == UFS_VENDOR_SAMSUNG)
996                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
997
998         /*
999          * Decide waiting time before gating reference clock and
1000          * after ungating reference clock according to vendors'
1001          * requirements.
1002          */
1003         if (mid == UFS_VENDOR_SAMSUNG)
1004                 ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1);
1005         else if (mid == UFS_VENDOR_SKHYNIX)
1006                 ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30);
1007         else if (mid == UFS_VENDOR_TOSHIBA)
1008                 ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32);
1009
1010         return 0;
1011 }
1012
1013 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1014 {
1015         ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1016
1017         if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1018             (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1019                 hba->vreg_info.vcc->always_on = true;
1020                 /*
1021                  * VCC will be kept always-on thus we don't
1022                  * need any delay during regulator operations
1023                  */
1024                 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1025                         UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1026         }
1027 }
1028
1029 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1030                                  enum ufs_event_type evt, void *data)
1031 {
1032         unsigned int val = *(u32 *)data;
1033
1034         trace_ufs_mtk_event(evt, val);
1035 }
1036
1037 /*
1038  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1039  *
1040  * The variant operations configure the necessary controller and PHY
1041  * handshake during initialization.
1042  */
1043 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1044         .name                = "mediatek.ufshci",
1045         .init                = ufs_mtk_init,
1046         .setup_clocks        = ufs_mtk_setup_clocks,
1047         .hce_enable_notify   = ufs_mtk_hce_enable_notify,
1048         .link_startup_notify = ufs_mtk_link_startup_notify,
1049         .pwr_change_notify   = ufs_mtk_pwr_change_notify,
1050         .apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1051         .fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1052         .suspend             = ufs_mtk_suspend,
1053         .resume              = ufs_mtk_resume,
1054         .dbg_register_dump   = ufs_mtk_dbg_register_dump,
1055         .device_reset        = ufs_mtk_device_reset,
1056         .event_notify        = ufs_mtk_event_notify,
1057 };
1058
1059 /**
1060  * ufs_mtk_probe - probe routine of the driver
1061  * @pdev: pointer to Platform device handle
1062  *
1063  * Return zero for success and non-zero for failure
1064  */
1065 static int ufs_mtk_probe(struct platform_device *pdev)
1066 {
1067         int err;
1068         struct device *dev = &pdev->dev;
1069
1070         /* perform generic probe */
1071         err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1072         if (err)
1073                 dev_info(dev, "probe failed %d\n", err);
1074
1075         return err;
1076 }
1077
1078 /**
1079  * ufs_mtk_remove - set driver_data of the device to NULL
1080  * @pdev: pointer to platform device handle
1081  *
1082  * Always return 0
1083  */
1084 static int ufs_mtk_remove(struct platform_device *pdev)
1085 {
1086         struct ufs_hba *hba =  platform_get_drvdata(pdev);
1087
1088         pm_runtime_get_sync(&(pdev)->dev);
1089         ufshcd_remove(hba);
1090         return 0;
1091 }
1092
1093 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1094         .suspend         = ufshcd_pltfrm_suspend,
1095         .resume          = ufshcd_pltfrm_resume,
1096         .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1097         .runtime_resume  = ufshcd_pltfrm_runtime_resume,
1098         .runtime_idle    = ufshcd_pltfrm_runtime_idle,
1099 };
1100
1101 static struct platform_driver ufs_mtk_pltform = {
1102         .probe      = ufs_mtk_probe,
1103         .remove     = ufs_mtk_remove,
1104         .shutdown   = ufshcd_pltfrm_shutdown,
1105         .driver = {
1106                 .name   = "ufshcd-mtk",
1107                 .pm     = &ufs_mtk_pm_ops,
1108                 .of_match_table = ufs_mtk_of_match,
1109         },
1110 };
1111
1112 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1113 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1114 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1115 MODULE_LICENSE("GPL v2");
1116
1117 module_platform_driver(ufs_mtk_pltform);