Merge remote-tracking branch 'torvalds/master' into perf/core
[linux-2.6-microblaze.git] / drivers / scsi / ufs / ufs-mediatek.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *      Stanley Chu <stanley.chu@mediatek.com>
6  *      Peter Wang <peter.wang@mediatek.com>
7  */
8
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/of.h>
12 #include <linux/of_address.h>
13 #include <linux/of_device.h>
14 #include <linux/phy/phy.h>
15 #include <linux/platform_device.h>
16 #include <linux/regulator/consumer.h>
17 #include <linux/reset.h>
18 #include <linux/soc/mediatek/mtk_sip_svc.h>
19
20 #include "ufshcd.h"
21 #include "ufshcd-crypto.h"
22 #include "ufshcd-pltfrm.h"
23 #include "ufs_quirks.h"
24 #include "unipro.h"
25 #include "ufs-mediatek.h"
26
27 #define CREATE_TRACE_POINTS
28 #include "ufs-mediatek-trace.h"
29
30 #define ufs_mtk_smc(cmd, val, res) \
31         arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
32                       cmd, val, 0, 0, 0, 0, 0, &(res))
33
34 #define ufs_mtk_va09_pwr_ctrl(res, on) \
35         ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res)
36
37 #define ufs_mtk_crypto_ctrl(res, enable) \
38         ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
39
40 #define ufs_mtk_ref_clk_notify(on, res) \
41         ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
42
43 #define ufs_mtk_device_reset_ctrl(high, res) \
44         ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
45
46 static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
47         UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
48                 UFS_DEVICE_QUIRK_DELAY_AFTER_LPM),
49         UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
50                 UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
51         END_FIX
52 };
53
54 static const struct of_device_id ufs_mtk_of_match[] = {
55         { .compatible = "mediatek,mt8183-ufshci" },
56         {},
57 };
58
59 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
60 {
61         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
62
63         return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
64 }
65
66 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
67 {
68         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
69
70         return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
71 }
72
73 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
74 {
75         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
76
77         return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
78 }
79
80 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
81 {
82         u32 tmp;
83
84         if (enable) {
85                 ufshcd_dme_get(hba,
86                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
87                 tmp = tmp |
88                       (1 << RX_SYMBOL_CLK_GATE_EN) |
89                       (1 << SYS_CLK_GATE_EN) |
90                       (1 << TX_CLK_GATE_EN);
91                 ufshcd_dme_set(hba,
92                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
93
94                 ufshcd_dme_get(hba,
95                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
96                 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
97                 ufshcd_dme_set(hba,
98                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
99         } else {
100                 ufshcd_dme_get(hba,
101                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
102                 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
103                               (1 << SYS_CLK_GATE_EN) |
104                               (1 << TX_CLK_GATE_EN));
105                 ufshcd_dme_set(hba,
106                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
107
108                 ufshcd_dme_get(hba,
109                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
110                 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
111                 ufshcd_dme_set(hba,
112                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
113         }
114 }
115
116 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
117 {
118         struct arm_smccc_res res;
119
120         ufs_mtk_crypto_ctrl(res, 1);
121         if (res.a0) {
122                 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
123                          __func__, res.a0);
124                 hba->caps &= ~UFSHCD_CAP_CRYPTO;
125         }
126 }
127
128 static void ufs_mtk_host_reset(struct ufs_hba *hba)
129 {
130         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
131
132         reset_control_assert(host->hci_reset);
133         reset_control_assert(host->crypto_reset);
134         reset_control_assert(host->unipro_reset);
135
136         usleep_range(100, 110);
137
138         reset_control_deassert(host->unipro_reset);
139         reset_control_deassert(host->crypto_reset);
140         reset_control_deassert(host->hci_reset);
141 }
142
143 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
144                                        struct reset_control **rc,
145                                        char *str)
146 {
147         *rc = devm_reset_control_get(hba->dev, str);
148         if (IS_ERR(*rc)) {
149                 dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
150                          str, PTR_ERR(*rc));
151                 *rc = NULL;
152         }
153 }
154
155 static void ufs_mtk_init_reset(struct ufs_hba *hba)
156 {
157         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
158
159         ufs_mtk_init_reset_control(hba, &host->hci_reset,
160                                    "hci_rst");
161         ufs_mtk_init_reset_control(hba, &host->unipro_reset,
162                                    "unipro_rst");
163         ufs_mtk_init_reset_control(hba, &host->crypto_reset,
164                                    "crypto_rst");
165 }
166
167 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
168                                      enum ufs_notify_change_status status)
169 {
170         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
171         unsigned long flags;
172
173         if (status == PRE_CHANGE) {
174                 if (host->unipro_lpm) {
175                         hba->vps->hba_enable_delay_us = 0;
176                 } else {
177                         hba->vps->hba_enable_delay_us = 600;
178                         ufs_mtk_host_reset(hba);
179                 }
180
181                 if (hba->caps & UFSHCD_CAP_CRYPTO)
182                         ufs_mtk_crypto_enable(hba);
183
184                 if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
185                         spin_lock_irqsave(hba->host->host_lock, flags);
186                         ufshcd_writel(hba, 0,
187                                       REG_AUTO_HIBERNATE_IDLE_TIMER);
188                         spin_unlock_irqrestore(hba->host->host_lock,
189                                                flags);
190
191                         hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
192                         hba->ahit = 0;
193                 }
194         }
195
196         return 0;
197 }
198
199 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
200 {
201         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
202         struct device *dev = hba->dev;
203         struct device_node *np = dev->of_node;
204         int err = 0;
205
206         host->mphy = devm_of_phy_get_by_index(dev, np, 0);
207
208         if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
209                 /*
210                  * UFS driver might be probed before the phy driver does.
211                  * In that case we would like to return EPROBE_DEFER code.
212                  */
213                 err = -EPROBE_DEFER;
214                 dev_info(dev,
215                          "%s: required phy hasn't probed yet. err = %d\n",
216                         __func__, err);
217         } else if (IS_ERR(host->mphy)) {
218                 err = PTR_ERR(host->mphy);
219                 if (err != -ENODEV) {
220                         dev_info(dev, "%s: PHY get failed %d\n", __func__,
221                                  err);
222                 }
223         }
224
225         if (err)
226                 host->mphy = NULL;
227         /*
228          * Allow unbound mphy because not every platform needs specific
229          * mphy control.
230          */
231         if (err == -ENODEV)
232                 err = 0;
233
234         return err;
235 }
236
237 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
238 {
239         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
240         struct arm_smccc_res res;
241         ktime_t timeout, time_checked;
242         u32 value;
243
244         if (host->ref_clk_enabled == on)
245                 return 0;
246
247         if (on) {
248                 ufs_mtk_ref_clk_notify(on, res);
249                 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
250                 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
251         } else {
252                 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
253         }
254
255         /* Wait for ack */
256         timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
257         do {
258                 time_checked = ktime_get();
259                 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
260
261                 /* Wait until ack bit equals to req bit */
262                 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
263                         goto out;
264
265                 usleep_range(100, 200);
266         } while (ktime_before(time_checked, timeout));
267
268         dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
269
270         ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
271
272         return -ETIMEDOUT;
273
274 out:
275         host->ref_clk_enabled = on;
276         if (!on) {
277                 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
278                 ufs_mtk_ref_clk_notify(on, res);
279         }
280
281         return 0;
282 }
283
284 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
285                                           u16 gating_us, u16 ungating_us)
286 {
287         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
288
289         if (hba->dev_info.clk_gating_wait_us) {
290                 host->ref_clk_gating_wait_us =
291                         hba->dev_info.clk_gating_wait_us;
292         } else {
293                 host->ref_clk_gating_wait_us = gating_us;
294         }
295
296         host->ref_clk_ungating_wait_us = ungating_us;
297 }
298
299 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
300                                    unsigned long max_wait_ms)
301 {
302         ktime_t timeout, time_checked;
303         u32 val;
304
305         timeout = ktime_add_ms(ktime_get(), max_wait_ms);
306         do {
307                 time_checked = ktime_get();
308                 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
309                 val = ufshcd_readl(hba, REG_UFS_PROBE);
310                 val = val >> 28;
311
312                 if (val == state)
313                         return 0;
314
315                 /* Sleep for max. 200us */
316                 usleep_range(100, 200);
317         } while (ktime_before(time_checked, timeout));
318
319         if (val == state)
320                 return 0;
321
322         return -ETIMEDOUT;
323 }
324
325 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
326 {
327         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
328         struct phy *mphy = host->mphy;
329         struct arm_smccc_res res;
330         int ret = 0;
331
332         if (!mphy || !(on ^ host->mphy_powered_on))
333                 return 0;
334
335         if (on) {
336                 if (ufs_mtk_is_va09_supported(hba)) {
337                         ret = regulator_enable(host->reg_va09);
338                         if (ret < 0)
339                                 goto out;
340                         /* wait 200 us to stablize VA09 */
341                         usleep_range(200, 210);
342                         ufs_mtk_va09_pwr_ctrl(res, 1);
343                 }
344                 phy_power_on(mphy);
345         } else {
346                 phy_power_off(mphy);
347                 if (ufs_mtk_is_va09_supported(hba)) {
348                         ufs_mtk_va09_pwr_ctrl(res, 0);
349                         ret = regulator_disable(host->reg_va09);
350                         if (ret < 0)
351                                 goto out;
352                 }
353         }
354 out:
355         if (ret) {
356                 dev_info(hba->dev,
357                          "failed to %s va09: %d\n",
358                          on ? "enable" : "disable",
359                          ret);
360         } else {
361                 host->mphy_powered_on = on;
362         }
363
364         return ret;
365 }
366
367 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
368                                 struct clk **clk_out)
369 {
370         struct clk *clk;
371         int err = 0;
372
373         clk = devm_clk_get(dev, name);
374         if (IS_ERR(clk))
375                 err = PTR_ERR(clk);
376         else
377                 *clk_out = clk;
378
379         return err;
380 }
381
382 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
383 {
384         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
385         struct ufs_mtk_crypt_cfg *cfg;
386         struct regulator *reg;
387         int volt, ret;
388
389         if (!ufs_mtk_is_boost_crypt_enabled(hba))
390                 return;
391
392         cfg = host->crypt;
393         volt = cfg->vcore_volt;
394         reg = cfg->reg_vcore;
395
396         ret = clk_prepare_enable(cfg->clk_crypt_mux);
397         if (ret) {
398                 dev_info(hba->dev, "clk_prepare_enable(): %d\n",
399                          ret);
400                 return;
401         }
402
403         if (boost) {
404                 ret = regulator_set_voltage(reg, volt, INT_MAX);
405                 if (ret) {
406                         dev_info(hba->dev,
407                                  "failed to set vcore to %d\n", volt);
408                         goto out;
409                 }
410
411                 ret = clk_set_parent(cfg->clk_crypt_mux,
412                                      cfg->clk_crypt_perf);
413                 if (ret) {
414                         dev_info(hba->dev,
415                                  "failed to set clk_crypt_perf\n");
416                         regulator_set_voltage(reg, 0, INT_MAX);
417                         goto out;
418                 }
419         } else {
420                 ret = clk_set_parent(cfg->clk_crypt_mux,
421                                      cfg->clk_crypt_lp);
422                 if (ret) {
423                         dev_info(hba->dev,
424                                  "failed to set clk_crypt_lp\n");
425                         goto out;
426                 }
427
428                 ret = regulator_set_voltage(reg, 0, INT_MAX);
429                 if (ret) {
430                         dev_info(hba->dev,
431                                  "failed to set vcore to MIN\n");
432                 }
433         }
434 out:
435         clk_disable_unprepare(cfg->clk_crypt_mux);
436 }
437
438 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
439                                  struct clk **clk)
440 {
441         int ret;
442
443         ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
444         if (ret) {
445                 dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
446                          name, ret);
447         }
448
449         return ret;
450 }
451
452 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
453 {
454         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
455         struct ufs_mtk_crypt_cfg *cfg;
456         struct device *dev = hba->dev;
457         struct regulator *reg;
458         u32 volt;
459
460         host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
461                                    GFP_KERNEL);
462         if (!host->crypt)
463                 goto disable_caps;
464
465         reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
466         if (IS_ERR(reg)) {
467                 dev_info(dev, "failed to get dvfsrc-vcore: %ld",
468                          PTR_ERR(reg));
469                 goto disable_caps;
470         }
471
472         if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
473                                  &volt)) {
474                 dev_info(dev, "failed to get boost-crypt-vcore-min");
475                 goto disable_caps;
476         }
477
478         cfg = host->crypt;
479         if (ufs_mtk_init_host_clk(hba, "crypt_mux",
480                                   &cfg->clk_crypt_mux))
481                 goto disable_caps;
482
483         if (ufs_mtk_init_host_clk(hba, "crypt_lp",
484                                   &cfg->clk_crypt_lp))
485                 goto disable_caps;
486
487         if (ufs_mtk_init_host_clk(hba, "crypt_perf",
488                                   &cfg->clk_crypt_perf))
489                 goto disable_caps;
490
491         cfg->reg_vcore = reg;
492         cfg->vcore_volt = volt;
493         host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
494
495 disable_caps:
496         return;
497 }
498
499 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
500 {
501         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
502
503         host->reg_va09 = regulator_get(hba->dev, "va09");
504         if (!host->reg_va09)
505                 dev_info(hba->dev, "failed to get va09");
506         else
507                 host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
508 }
509
510 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
511 {
512         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
513         struct device_node *np = hba->dev->of_node;
514
515         if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
516                 ufs_mtk_init_boost_crypt(hba);
517
518         if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
519                 ufs_mtk_init_va09_pwr_ctrl(hba);
520
521         if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
522                 host->caps |= UFS_MTK_CAP_DISABLE_AH8;
523
524         if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
525                 host->caps |= UFS_MTK_CAP_BROKEN_VCC;
526
527         dev_info(hba->dev, "caps: 0x%x", host->caps);
528 }
529
530 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool up)
531 {
532         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
533
534         ufs_mtk_boost_crypt(hba, up);
535         ufs_mtk_setup_ref_clk(hba, up);
536
537         if (up)
538                 phy_power_on(host->mphy);
539         else
540                 phy_power_off(host->mphy);
541 }
542
543 /**
544  * ufs_mtk_setup_clocks - enables/disable clocks
545  * @hba: host controller instance
546  * @on: If true, enable clocks else disable them.
547  * @status: PRE_CHANGE or POST_CHANGE notify
548  *
549  * Returns 0 on success, non-zero on failure.
550  */
551 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
552                                 enum ufs_notify_change_status status)
553 {
554         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
555         bool clk_pwr_off = false;
556         int ret = 0;
557
558         /*
559          * In case ufs_mtk_init() is not yet done, simply ignore.
560          * This ufs_mtk_setup_clocks() shall be called from
561          * ufs_mtk_init() after init is done.
562          */
563         if (!host)
564                 return 0;
565
566         if (!on && status == PRE_CHANGE) {
567                 if (ufshcd_is_link_off(hba)) {
568                         clk_pwr_off = true;
569                 } else if (ufshcd_is_link_hibern8(hba) ||
570                          (!ufshcd_can_hibern8_during_gating(hba) &&
571                          ufshcd_is_auto_hibern8_enabled(hba))) {
572                         /*
573                          * Gate ref-clk and poweroff mphy if link state is in
574                          * OFF or Hibern8 by either Auto-Hibern8 or
575                          * ufshcd_link_state_transition().
576                          */
577                         ret = ufs_mtk_wait_link_state(hba,
578                                                       VS_LINK_HIBERN8,
579                                                       15);
580                         if (!ret)
581                                 clk_pwr_off = true;
582                 }
583
584                 if (clk_pwr_off)
585                         ufs_mtk_scale_perf(hba, false);
586         } else if (on && status == POST_CHANGE) {
587                 ufs_mtk_scale_perf(hba, true);
588         }
589
590         return ret;
591 }
592
593 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
594 {
595         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
596         int ret, ver = 0;
597
598         if (host->hw_ver.major)
599                 return;
600
601         /* Set default (minimum) version anyway */
602         host->hw_ver.major = 2;
603
604         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
605         if (!ret) {
606                 if (ver >= UFS_UNIPRO_VER_1_8) {
607                         host->hw_ver.major = 3;
608                         /*
609                          * Fix HCI version for some platforms with
610                          * incorrect version
611                          */
612                         if (hba->ufs_version < ufshci_version(3, 0))
613                                 hba->ufs_version = ufshci_version(3, 0);
614                 }
615         }
616 }
617
618 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
619 {
620         return hba->ufs_version;
621 }
622
623 /**
624  * ufs_mtk_init - find other essential mmio bases
625  * @hba: host controller instance
626  *
627  * Binds PHY with controller and powers up PHY enabling clocks
628  * and regulators.
629  *
630  * Returns -EPROBE_DEFER if binding fails, returns negative error
631  * on phy power up failure and returns zero on success.
632  */
633 static int ufs_mtk_init(struct ufs_hba *hba)
634 {
635         const struct of_device_id *id;
636         struct device *dev = hba->dev;
637         struct ufs_mtk_host *host;
638         int err = 0;
639
640         host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
641         if (!host) {
642                 err = -ENOMEM;
643                 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
644                 goto out;
645         }
646
647         host->hba = hba;
648         ufshcd_set_variant(hba, host);
649
650         id = of_match_device(ufs_mtk_of_match, dev);
651         if (!id) {
652                 err = -EINVAL;
653                 goto out;
654         }
655
656         /* Initialize host capability */
657         ufs_mtk_init_host_caps(hba);
658
659         err = ufs_mtk_bind_mphy(hba);
660         if (err)
661                 goto out_variant_clear;
662
663         ufs_mtk_init_reset(hba);
664
665         /* Enable runtime autosuspend */
666         hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
667
668         /* Enable clock-gating */
669         hba->caps |= UFSHCD_CAP_CLK_GATING;
670
671         /* Enable inline encryption */
672         hba->caps |= UFSHCD_CAP_CRYPTO;
673
674         /* Enable WriteBooster */
675         hba->caps |= UFSHCD_CAP_WB_EN;
676         hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
677         hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
678
679         if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
680                 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
681
682         /*
683          * ufshcd_vops_init() is invoked after
684          * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
685          * phy clock setup is skipped.
686          *
687          * Enable phy clocks specifically here.
688          */
689         ufs_mtk_mphy_power_on(hba, true);
690         ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
691
692         goto out;
693
694 out_variant_clear:
695         ufshcd_set_variant(hba, NULL);
696 out:
697         return err;
698 }
699
700 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
701                                   struct ufs_pa_layer_attr *dev_max_params,
702                                   struct ufs_pa_layer_attr *dev_req_params)
703 {
704         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
705         struct ufs_dev_params host_cap;
706         int ret;
707
708         ufshcd_init_pwr_dev_param(&host_cap);
709         host_cap.hs_rx_gear = UFS_HS_G4;
710         host_cap.hs_tx_gear = UFS_HS_G4;
711
712         ret = ufshcd_get_pwr_dev_param(&host_cap,
713                                        dev_max_params,
714                                        dev_req_params);
715         if (ret) {
716                 pr_info("%s: failed to determine capabilities\n",
717                         __func__);
718         }
719
720         if (host->hw_ver.major >= 3) {
721                 ret = ufshcd_dme_configure_adapt(hba,
722                                            dev_req_params->gear_tx,
723                                            PA_INITIAL_ADAPT);
724         }
725
726         return ret;
727 }
728
729 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
730                                      enum ufs_notify_change_status stage,
731                                      struct ufs_pa_layer_attr *dev_max_params,
732                                      struct ufs_pa_layer_attr *dev_req_params)
733 {
734         int ret = 0;
735
736         switch (stage) {
737         case PRE_CHANGE:
738                 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
739                                              dev_req_params);
740                 break;
741         case POST_CHANGE:
742                 break;
743         default:
744                 ret = -EINVAL;
745                 break;
746         }
747
748         return ret;
749 }
750
751 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
752 {
753         int ret;
754         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
755
756         ret = ufshcd_dme_set(hba,
757                              UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
758                              lpm ? 1 : 0);
759         if (!ret || !lpm) {
760                 /*
761                  * Forcibly set as non-LPM mode if UIC commands is failed
762                  * to use default hba_enable_delay_us value for re-enabling
763                  * the host.
764                  */
765                 host->unipro_lpm = lpm;
766         }
767
768         return ret;
769 }
770
771 static int ufs_mtk_pre_link(struct ufs_hba *hba)
772 {
773         int ret;
774         u32 tmp;
775
776         ufs_mtk_get_controller_version(hba);
777
778         ret = ufs_mtk_unipro_set_lpm(hba, false);
779         if (ret)
780                 return ret;
781
782         /*
783          * Setting PA_Local_TX_LCC_Enable to 0 before link startup
784          * to make sure that both host and device TX LCC are disabled
785          * once link startup is completed.
786          */
787         ret = ufshcd_disable_host_tx_lcc(hba);
788         if (ret)
789                 return ret;
790
791         /* disable deep stall */
792         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
793         if (ret)
794                 return ret;
795
796         tmp &= ~(1 << 6);
797
798         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
799
800         return ret;
801 }
802
803 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
804 {
805         unsigned long flags;
806         u32 ah_ms;
807
808         if (ufshcd_is_clkgating_allowed(hba)) {
809                 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
810                         ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
811                                           hba->ahit);
812                 else
813                         ah_ms = 10;
814                 spin_lock_irqsave(hba->host->host_lock, flags);
815                 hba->clk_gating.delay_ms = ah_ms + 5;
816                 spin_unlock_irqrestore(hba->host->host_lock, flags);
817         }
818 }
819
820 static int ufs_mtk_post_link(struct ufs_hba *hba)
821 {
822         /* enable unipro clock gating feature */
823         ufs_mtk_cfg_unipro_cg(hba, true);
824
825         /* configure auto-hibern8 timer to 10ms */
826         if (ufshcd_is_auto_hibern8_supported(hba)) {
827                 ufshcd_auto_hibern8_update(hba,
828                         FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
829                         FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
830         }
831
832         ufs_mtk_setup_clk_gating(hba);
833
834         return 0;
835 }
836
837 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
838                                        enum ufs_notify_change_status stage)
839 {
840         int ret = 0;
841
842         switch (stage) {
843         case PRE_CHANGE:
844                 ret = ufs_mtk_pre_link(hba);
845                 break;
846         case POST_CHANGE:
847                 ret = ufs_mtk_post_link(hba);
848                 break;
849         default:
850                 ret = -EINVAL;
851                 break;
852         }
853
854         return ret;
855 }
856
857 static int ufs_mtk_device_reset(struct ufs_hba *hba)
858 {
859         struct arm_smccc_res res;
860
861         ufs_mtk_device_reset_ctrl(0, res);
862
863         /*
864          * The reset signal is active low. UFS devices shall detect
865          * more than or equal to 1us of positive or negative RST_n
866          * pulse width.
867          *
868          * To be on safe side, keep the reset low for at least 10us.
869          */
870         usleep_range(10, 15);
871
872         ufs_mtk_device_reset_ctrl(1, res);
873
874         /* Some devices may need time to respond to rst_n */
875         usleep_range(10000, 15000);
876
877         dev_info(hba->dev, "device reset done\n");
878
879         return 0;
880 }
881
882 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
883 {
884         int err;
885
886         err = ufshcd_hba_enable(hba);
887         if (err)
888                 return err;
889
890         err = ufs_mtk_unipro_set_lpm(hba, false);
891         if (err)
892                 return err;
893
894         err = ufshcd_uic_hibern8_exit(hba);
895         if (!err)
896                 ufshcd_set_link_active(hba);
897         else
898                 return err;
899
900         err = ufshcd_make_hba_operational(hba);
901         if (err)
902                 return err;
903
904         return 0;
905 }
906
907 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
908 {
909         int err;
910
911         err = ufs_mtk_unipro_set_lpm(hba, true);
912         if (err) {
913                 /* Resume UniPro state for following error recovery */
914                 ufs_mtk_unipro_set_lpm(hba, false);
915                 return err;
916         }
917
918         return 0;
919 }
920
921 static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
922 {
923         if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
924                 return;
925
926         if (lpm && !hba->vreg_info.vcc->enabled)
927                 regulator_set_mode(hba->vreg_info.vccq2->reg,
928                                    REGULATOR_MODE_IDLE);
929         else if (!lpm)
930                 regulator_set_mode(hba->vreg_info.vccq2->reg,
931                                    REGULATOR_MODE_NORMAL);
932 }
933
934 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
935 {
936         int err;
937         struct arm_smccc_res res;
938
939         if (ufshcd_is_link_hibern8(hba)) {
940                 err = ufs_mtk_link_set_lpm(hba);
941                 if (err)
942                         goto fail;
943         }
944
945         if (!ufshcd_is_link_active(hba)) {
946                 /*
947                  * Make sure no error will be returned to prevent
948                  * ufshcd_suspend() re-enabling regulators while vreg is still
949                  * in low-power mode.
950                  */
951                 ufs_mtk_vreg_set_lpm(hba, true);
952                 err = ufs_mtk_mphy_power_on(hba, false);
953                 if (err)
954                         goto fail;
955         }
956
957         if (ufshcd_is_link_off(hba))
958                 ufs_mtk_device_reset_ctrl(0, res);
959
960         return 0;
961 fail:
962         /*
963          * Set link as off state enforcedly to trigger
964          * ufshcd_host_reset_and_restore() in ufshcd_suspend()
965          * for completed host reset.
966          */
967         ufshcd_set_link_off(hba);
968         return -EAGAIN;
969 }
970
971 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
972 {
973         int err;
974
975         err = ufs_mtk_mphy_power_on(hba, true);
976         if (err)
977                 goto fail;
978
979         ufs_mtk_vreg_set_lpm(hba, false);
980
981         if (ufshcd_is_link_hibern8(hba)) {
982                 err = ufs_mtk_link_set_hpm(hba);
983                 if (err)
984                         goto fail;
985         }
986
987         return 0;
988 fail:
989         return ufshcd_link_recovery(hba);
990 }
991
992 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
993 {
994         ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
995
996         ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
997
998         ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
999                          REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1000                          "MPHY Ctrl ");
1001
1002         /* Direct debugging information to REG_MTK_PROBE */
1003         ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
1004         ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1005 }
1006
1007 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1008 {
1009         struct ufs_dev_info *dev_info = &hba->dev_info;
1010         u16 mid = dev_info->wmanufacturerid;
1011
1012         if (mid == UFS_VENDOR_SAMSUNG)
1013                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1014
1015         /*
1016          * Decide waiting time before gating reference clock and
1017          * after ungating reference clock according to vendors'
1018          * requirements.
1019          */
1020         if (mid == UFS_VENDOR_SAMSUNG)
1021                 ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1);
1022         else if (mid == UFS_VENDOR_SKHYNIX)
1023                 ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30);
1024         else if (mid == UFS_VENDOR_TOSHIBA)
1025                 ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32);
1026
1027         return 0;
1028 }
1029
1030 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1031 {
1032         ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1033
1034         if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1035             (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1036                 hba->vreg_info.vcc->always_on = true;
1037                 /*
1038                  * VCC will be kept always-on thus we don't
1039                  * need any delay during regulator operations
1040                  */
1041                 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1042                         UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1043         }
1044 }
1045
1046 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1047                                  enum ufs_event_type evt, void *data)
1048 {
1049         unsigned int val = *(u32 *)data;
1050
1051         trace_ufs_mtk_event(evt, val);
1052 }
1053
1054 /*
1055  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1056  *
1057  * The variant operations configure the necessary controller and PHY
1058  * handshake during initialization.
1059  */
1060 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1061         .name                = "mediatek.ufshci",
1062         .init                = ufs_mtk_init,
1063         .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1064         .setup_clocks        = ufs_mtk_setup_clocks,
1065         .hce_enable_notify   = ufs_mtk_hce_enable_notify,
1066         .link_startup_notify = ufs_mtk_link_startup_notify,
1067         .pwr_change_notify   = ufs_mtk_pwr_change_notify,
1068         .apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1069         .fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1070         .suspend             = ufs_mtk_suspend,
1071         .resume              = ufs_mtk_resume,
1072         .dbg_register_dump   = ufs_mtk_dbg_register_dump,
1073         .device_reset        = ufs_mtk_device_reset,
1074         .event_notify        = ufs_mtk_event_notify,
1075 };
1076
1077 /**
1078  * ufs_mtk_probe - probe routine of the driver
1079  * @pdev: pointer to Platform device handle
1080  *
1081  * Return zero for success and non-zero for failure
1082  */
1083 static int ufs_mtk_probe(struct platform_device *pdev)
1084 {
1085         int err;
1086         struct device *dev = &pdev->dev;
1087
1088         /* perform generic probe */
1089         err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1090         if (err)
1091                 dev_info(dev, "probe failed %d\n", err);
1092
1093         return err;
1094 }
1095
1096 /**
1097  * ufs_mtk_remove - set driver_data of the device to NULL
1098  * @pdev: pointer to platform device handle
1099  *
1100  * Always return 0
1101  */
1102 static int ufs_mtk_remove(struct platform_device *pdev)
1103 {
1104         struct ufs_hba *hba =  platform_get_drvdata(pdev);
1105
1106         pm_runtime_get_sync(&(pdev)->dev);
1107         ufshcd_remove(hba);
1108         return 0;
1109 }
1110
1111 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1112         .suspend         = ufshcd_pltfrm_suspend,
1113         .resume          = ufshcd_pltfrm_resume,
1114         .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1115         .runtime_resume  = ufshcd_pltfrm_runtime_resume,
1116         .runtime_idle    = ufshcd_pltfrm_runtime_idle,
1117 };
1118
1119 static struct platform_driver ufs_mtk_pltform = {
1120         .probe      = ufs_mtk_probe,
1121         .remove     = ufs_mtk_remove,
1122         .shutdown   = ufshcd_pltfrm_shutdown,
1123         .driver = {
1124                 .name   = "ufshcd-mtk",
1125                 .pm     = &ufs_mtk_pm_ops,
1126                 .of_match_table = ufs_mtk_of_match,
1127         },
1128 };
1129
1130 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1131 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1132 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1133 MODULE_LICENSE("GPL v2");
1134
1135 module_platform_driver(ufs_mtk_pltform);