Merge tag 'batadv-net-pullrequest-20210608' of git://git.open-mesh.org/linux-merge
[linux-2.6-microblaze.git] / drivers / scsi / ufs / ufs-mediatek.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *      Stanley Chu <stanley.chu@mediatek.com>
6  *      Peter Wang <peter.wang@mediatek.com>
7  */
8
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/of.h>
12 #include <linux/of_address.h>
13 #include <linux/of_device.h>
14 #include <linux/phy/phy.h>
15 #include <linux/platform_device.h>
16 #include <linux/regulator/consumer.h>
17 #include <linux/reset.h>
18 #include <linux/soc/mediatek/mtk_sip_svc.h>
19
20 #include "ufshcd.h"
21 #include "ufshcd-crypto.h"
22 #include "ufshcd-pltfrm.h"
23 #include "ufs_quirks.h"
24 #include "unipro.h"
25 #include "ufs-mediatek.h"
26
27 #define CREATE_TRACE_POINTS
28 #include "ufs-mediatek-trace.h"
29
30 #define ufs_mtk_smc(cmd, val, res) \
31         arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
32                       cmd, val, 0, 0, 0, 0, 0, &(res))
33
34 #define ufs_mtk_va09_pwr_ctrl(res, on) \
35         ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res)
36
37 #define ufs_mtk_crypto_ctrl(res, enable) \
38         ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
39
40 #define ufs_mtk_ref_clk_notify(on, res) \
41         ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
42
43 #define ufs_mtk_device_reset_ctrl(high, res) \
44         ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
45
46 static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
47         UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
48                 UFS_DEVICE_QUIRK_DELAY_AFTER_LPM),
49         UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
50                 UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
51         END_FIX
52 };
53
54 static const struct of_device_id ufs_mtk_of_match[] = {
55         { .compatible = "mediatek,mt8183-ufshci" },
56         {},
57 };
58
59 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
60 {
61         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
62
63         return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
64 }
65
66 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
67 {
68         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
69
70         return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
71 }
72
73 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
74 {
75         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
76
77         return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
78 }
79
80 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
81 {
82         u32 tmp;
83
84         if (enable) {
85                 ufshcd_dme_get(hba,
86                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
87                 tmp = tmp |
88                       (1 << RX_SYMBOL_CLK_GATE_EN) |
89                       (1 << SYS_CLK_GATE_EN) |
90                       (1 << TX_CLK_GATE_EN);
91                 ufshcd_dme_set(hba,
92                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
93
94                 ufshcd_dme_get(hba,
95                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
96                 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
97                 ufshcd_dme_set(hba,
98                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
99         } else {
100                 ufshcd_dme_get(hba,
101                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
102                 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
103                               (1 << SYS_CLK_GATE_EN) |
104                               (1 << TX_CLK_GATE_EN));
105                 ufshcd_dme_set(hba,
106                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
107
108                 ufshcd_dme_get(hba,
109                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
110                 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
111                 ufshcd_dme_set(hba,
112                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
113         }
114 }
115
116 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
117 {
118         struct arm_smccc_res res;
119
120         ufs_mtk_crypto_ctrl(res, 1);
121         if (res.a0) {
122                 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
123                          __func__, res.a0);
124                 hba->caps &= ~UFSHCD_CAP_CRYPTO;
125         }
126 }
127
128 static void ufs_mtk_host_reset(struct ufs_hba *hba)
129 {
130         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
131
132         reset_control_assert(host->hci_reset);
133         reset_control_assert(host->crypto_reset);
134         reset_control_assert(host->unipro_reset);
135
136         usleep_range(100, 110);
137
138         reset_control_deassert(host->unipro_reset);
139         reset_control_deassert(host->crypto_reset);
140         reset_control_deassert(host->hci_reset);
141 }
142
143 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
144                                        struct reset_control **rc,
145                                        char *str)
146 {
147         *rc = devm_reset_control_get(hba->dev, str);
148         if (IS_ERR(*rc)) {
149                 dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
150                          str, PTR_ERR(*rc));
151                 *rc = NULL;
152         }
153 }
154
155 static void ufs_mtk_init_reset(struct ufs_hba *hba)
156 {
157         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
158
159         ufs_mtk_init_reset_control(hba, &host->hci_reset,
160                                    "hci_rst");
161         ufs_mtk_init_reset_control(hba, &host->unipro_reset,
162                                    "unipro_rst");
163         ufs_mtk_init_reset_control(hba, &host->crypto_reset,
164                                    "crypto_rst");
165 }
166
167 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
168                                      enum ufs_notify_change_status status)
169 {
170         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
171         unsigned long flags;
172
173         if (status == PRE_CHANGE) {
174                 if (host->unipro_lpm) {
175                         hba->vps->hba_enable_delay_us = 0;
176                 } else {
177                         hba->vps->hba_enable_delay_us = 600;
178                         ufs_mtk_host_reset(hba);
179                 }
180
181                 if (hba->caps & UFSHCD_CAP_CRYPTO)
182                         ufs_mtk_crypto_enable(hba);
183
184                 if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
185                         spin_lock_irqsave(hba->host->host_lock, flags);
186                         ufshcd_writel(hba, 0,
187                                       REG_AUTO_HIBERNATE_IDLE_TIMER);
188                         spin_unlock_irqrestore(hba->host->host_lock,
189                                                flags);
190
191                         hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
192                         hba->ahit = 0;
193                 }
194         }
195
196         return 0;
197 }
198
199 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
200 {
201         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
202         struct device *dev = hba->dev;
203         struct device_node *np = dev->of_node;
204         int err = 0;
205
206         host->mphy = devm_of_phy_get_by_index(dev, np, 0);
207
208         if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
209                 /*
210                  * UFS driver might be probed before the phy driver does.
211                  * In that case we would like to return EPROBE_DEFER code.
212                  */
213                 err = -EPROBE_DEFER;
214                 dev_info(dev,
215                          "%s: required phy hasn't probed yet. err = %d\n",
216                         __func__, err);
217         } else if (IS_ERR(host->mphy)) {
218                 err = PTR_ERR(host->mphy);
219                 if (err != -ENODEV) {
220                         dev_info(dev, "%s: PHY get failed %d\n", __func__,
221                                  err);
222                 }
223         }
224
225         if (err)
226                 host->mphy = NULL;
227         /*
228          * Allow unbound mphy because not every platform needs specific
229          * mphy control.
230          */
231         if (err == -ENODEV)
232                 err = 0;
233
234         return err;
235 }
236
237 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
238 {
239         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
240         struct arm_smccc_res res;
241         ktime_t timeout, time_checked;
242         u32 value;
243
244         if (host->ref_clk_enabled == on)
245                 return 0;
246
247         if (on) {
248                 ufs_mtk_ref_clk_notify(on, res);
249                 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
250                 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
251         } else {
252                 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
253         }
254
255         /* Wait for ack */
256         timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
257         do {
258                 time_checked = ktime_get();
259                 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
260
261                 /* Wait until ack bit equals to req bit */
262                 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
263                         goto out;
264
265                 usleep_range(100, 200);
266         } while (ktime_before(time_checked, timeout));
267
268         dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
269
270         ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
271
272         return -ETIMEDOUT;
273
274 out:
275         host->ref_clk_enabled = on;
276         if (!on) {
277                 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
278                 ufs_mtk_ref_clk_notify(on, res);
279         }
280
281         return 0;
282 }
283
284 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
285                                           u16 gating_us, u16 ungating_us)
286 {
287         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
288
289         if (hba->dev_info.clk_gating_wait_us) {
290                 host->ref_clk_gating_wait_us =
291                         hba->dev_info.clk_gating_wait_us;
292         } else {
293                 host->ref_clk_gating_wait_us = gating_us;
294         }
295
296         host->ref_clk_ungating_wait_us = ungating_us;
297 }
298
299 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
300                                    unsigned long max_wait_ms)
301 {
302         ktime_t timeout, time_checked;
303         u32 val;
304
305         timeout = ktime_add_ms(ktime_get(), max_wait_ms);
306         do {
307                 time_checked = ktime_get();
308                 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
309                 val = ufshcd_readl(hba, REG_UFS_PROBE);
310                 val = val >> 28;
311
312                 if (val == state)
313                         return 0;
314
315                 /* Sleep for max. 200us */
316                 usleep_range(100, 200);
317         } while (ktime_before(time_checked, timeout));
318
319         if (val == state)
320                 return 0;
321
322         return -ETIMEDOUT;
323 }
324
325 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
326 {
327         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
328         struct phy *mphy = host->mphy;
329         struct arm_smccc_res res;
330         int ret = 0;
331
332         if (!mphy || !(on ^ host->mphy_powered_on))
333                 return 0;
334
335         if (on) {
336                 if (ufs_mtk_is_va09_supported(hba)) {
337                         ret = regulator_enable(host->reg_va09);
338                         if (ret < 0)
339                                 goto out;
340                         /* wait 200 us to stablize VA09 */
341                         usleep_range(200, 210);
342                         ufs_mtk_va09_pwr_ctrl(res, 1);
343                 }
344                 phy_power_on(mphy);
345         } else {
346                 phy_power_off(mphy);
347                 if (ufs_mtk_is_va09_supported(hba)) {
348                         ufs_mtk_va09_pwr_ctrl(res, 0);
349                         ret = regulator_disable(host->reg_va09);
350                         if (ret < 0)
351                                 goto out;
352                 }
353         }
354 out:
355         if (ret) {
356                 dev_info(hba->dev,
357                          "failed to %s va09: %d\n",
358                          on ? "enable" : "disable",
359                          ret);
360         } else {
361                 host->mphy_powered_on = on;
362         }
363
364         return ret;
365 }
366
367 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
368                                 struct clk **clk_out)
369 {
370         struct clk *clk;
371         int err = 0;
372
373         clk = devm_clk_get(dev, name);
374         if (IS_ERR(clk))
375                 err = PTR_ERR(clk);
376         else
377                 *clk_out = clk;
378
379         return err;
380 }
381
382 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
383 {
384         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
385         struct ufs_mtk_crypt_cfg *cfg;
386         struct regulator *reg;
387         int volt, ret;
388
389         if (!ufs_mtk_is_boost_crypt_enabled(hba))
390                 return;
391
392         cfg = host->crypt;
393         volt = cfg->vcore_volt;
394         reg = cfg->reg_vcore;
395
396         ret = clk_prepare_enable(cfg->clk_crypt_mux);
397         if (ret) {
398                 dev_info(hba->dev, "clk_prepare_enable(): %d\n",
399                          ret);
400                 return;
401         }
402
403         if (boost) {
404                 ret = regulator_set_voltage(reg, volt, INT_MAX);
405                 if (ret) {
406                         dev_info(hba->dev,
407                                  "failed to set vcore to %d\n", volt);
408                         goto out;
409                 }
410
411                 ret = clk_set_parent(cfg->clk_crypt_mux,
412                                      cfg->clk_crypt_perf);
413                 if (ret) {
414                         dev_info(hba->dev,
415                                  "failed to set clk_crypt_perf\n");
416                         regulator_set_voltage(reg, 0, INT_MAX);
417                         goto out;
418                 }
419         } else {
420                 ret = clk_set_parent(cfg->clk_crypt_mux,
421                                      cfg->clk_crypt_lp);
422                 if (ret) {
423                         dev_info(hba->dev,
424                                  "failed to set clk_crypt_lp\n");
425                         goto out;
426                 }
427
428                 ret = regulator_set_voltage(reg, 0, INT_MAX);
429                 if (ret) {
430                         dev_info(hba->dev,
431                                  "failed to set vcore to MIN\n");
432                 }
433         }
434 out:
435         clk_disable_unprepare(cfg->clk_crypt_mux);
436 }
437
438 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
439                                  struct clk **clk)
440 {
441         int ret;
442
443         ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
444         if (ret) {
445                 dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
446                          name, ret);
447         }
448
449         return ret;
450 }
451
452 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
453 {
454         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
455         struct ufs_mtk_crypt_cfg *cfg;
456         struct device *dev = hba->dev;
457         struct regulator *reg;
458         u32 volt;
459
460         host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
461                                    GFP_KERNEL);
462         if (!host->crypt)
463                 goto disable_caps;
464
465         reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
466         if (IS_ERR(reg)) {
467                 dev_info(dev, "failed to get dvfsrc-vcore: %ld",
468                          PTR_ERR(reg));
469                 goto disable_caps;
470         }
471
472         if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
473                                  &volt)) {
474                 dev_info(dev, "failed to get boost-crypt-vcore-min");
475                 goto disable_caps;
476         }
477
478         cfg = host->crypt;
479         if (ufs_mtk_init_host_clk(hba, "crypt_mux",
480                                   &cfg->clk_crypt_mux))
481                 goto disable_caps;
482
483         if (ufs_mtk_init_host_clk(hba, "crypt_lp",
484                                   &cfg->clk_crypt_lp))
485                 goto disable_caps;
486
487         if (ufs_mtk_init_host_clk(hba, "crypt_perf",
488                                   &cfg->clk_crypt_perf))
489                 goto disable_caps;
490
491         cfg->reg_vcore = reg;
492         cfg->vcore_volt = volt;
493         host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
494
495 disable_caps:
496         return;
497 }
498
499 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
500 {
501         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
502
503         host->reg_va09 = regulator_get(hba->dev, "va09");
504         if (!host->reg_va09)
505                 dev_info(hba->dev, "failed to get va09");
506         else
507                 host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
508 }
509
510 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
511 {
512         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
513         struct device_node *np = hba->dev->of_node;
514
515         if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
516                 ufs_mtk_init_boost_crypt(hba);
517
518         if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
519                 ufs_mtk_init_va09_pwr_ctrl(hba);
520
521         if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
522                 host->caps |= UFS_MTK_CAP_DISABLE_AH8;
523
524         if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
525                 host->caps |= UFS_MTK_CAP_BROKEN_VCC;
526
527         dev_info(hba->dev, "caps: 0x%x", host->caps);
528 }
529
530 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool up)
531 {
532         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
533
534         ufs_mtk_boost_crypt(hba, up);
535         ufs_mtk_setup_ref_clk(hba, up);
536
537         if (up)
538                 phy_power_on(host->mphy);
539         else
540                 phy_power_off(host->mphy);
541 }
542
543 /**
544  * ufs_mtk_setup_clocks - enables/disable clocks
545  * @hba: host controller instance
546  * @on: If true, enable clocks else disable them.
547  * @status: PRE_CHANGE or POST_CHANGE notify
548  *
549  * Returns 0 on success, non-zero on failure.
550  */
551 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
552                                 enum ufs_notify_change_status status)
553 {
554         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
555         bool clk_pwr_off = false;
556         int ret = 0;
557
558         /*
559          * In case ufs_mtk_init() is not yet done, simply ignore.
560          * This ufs_mtk_setup_clocks() shall be called from
561          * ufs_mtk_init() after init is done.
562          */
563         if (!host)
564                 return 0;
565
566         if (!on && status == PRE_CHANGE) {
567                 if (ufshcd_is_link_off(hba)) {
568                         clk_pwr_off = true;
569                 } else if (ufshcd_is_link_hibern8(hba) ||
570                          (!ufshcd_can_hibern8_during_gating(hba) &&
571                          ufshcd_is_auto_hibern8_enabled(hba))) {
572                         /*
573                          * Gate ref-clk and poweroff mphy if link state is in
574                          * OFF or Hibern8 by either Auto-Hibern8 or
575                          * ufshcd_link_state_transition().
576                          */
577                         ret = ufs_mtk_wait_link_state(hba,
578                                                       VS_LINK_HIBERN8,
579                                                       15);
580                         if (!ret)
581                                 clk_pwr_off = true;
582                 }
583
584                 if (clk_pwr_off)
585                         ufs_mtk_scale_perf(hba, false);
586         } else if (on && status == POST_CHANGE) {
587                 ufs_mtk_scale_perf(hba, true);
588         }
589
590         return ret;
591 }
592
593 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
594 {
595         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
596         int ret, ver = 0;
597
598         if (host->hw_ver.major)
599                 return;
600
601         /* Set default (minimum) version anyway */
602         host->hw_ver.major = 2;
603
604         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
605         if (!ret) {
606                 if (ver >= UFS_UNIPRO_VER_1_8)
607                         host->hw_ver.major = 3;
608         }
609 }
610
611 /**
612  * ufs_mtk_init - find other essential mmio bases
613  * @hba: host controller instance
614  *
615  * Binds PHY with controller and powers up PHY enabling clocks
616  * and regulators.
617  *
618  * Returns -EPROBE_DEFER if binding fails, returns negative error
619  * on phy power up failure and returns zero on success.
620  */
621 static int ufs_mtk_init(struct ufs_hba *hba)
622 {
623         const struct of_device_id *id;
624         struct device *dev = hba->dev;
625         struct ufs_mtk_host *host;
626         int err = 0;
627
628         host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
629         if (!host) {
630                 err = -ENOMEM;
631                 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
632                 goto out;
633         }
634
635         host->hba = hba;
636         ufshcd_set_variant(hba, host);
637
638         id = of_match_device(ufs_mtk_of_match, dev);
639         if (!id) {
640                 err = -EINVAL;
641                 goto out;
642         }
643
644         /* Initialize host capability */
645         ufs_mtk_init_host_caps(hba);
646
647         err = ufs_mtk_bind_mphy(hba);
648         if (err)
649                 goto out_variant_clear;
650
651         ufs_mtk_init_reset(hba);
652
653         /* Enable runtime autosuspend */
654         hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
655
656         /* Enable clock-gating */
657         hba->caps |= UFSHCD_CAP_CLK_GATING;
658
659         /* Enable inline encryption */
660         hba->caps |= UFSHCD_CAP_CRYPTO;
661
662         /* Enable WriteBooster */
663         hba->caps |= UFSHCD_CAP_WB_EN;
664         hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
665         hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
666
667         if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
668                 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
669
670         /*
671          * ufshcd_vops_init() is invoked after
672          * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
673          * phy clock setup is skipped.
674          *
675          * Enable phy clocks specifically here.
676          */
677         ufs_mtk_mphy_power_on(hba, true);
678         ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
679
680         goto out;
681
682 out_variant_clear:
683         ufshcd_set_variant(hba, NULL);
684 out:
685         return err;
686 }
687
688 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
689                                   struct ufs_pa_layer_attr *dev_max_params,
690                                   struct ufs_pa_layer_attr *dev_req_params)
691 {
692         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
693         struct ufs_dev_params host_cap;
694         int ret;
695
696         ufshcd_init_pwr_dev_param(&host_cap);
697         host_cap.hs_rx_gear = UFS_HS_G4;
698         host_cap.hs_tx_gear = UFS_HS_G4;
699
700         ret = ufshcd_get_pwr_dev_param(&host_cap,
701                                        dev_max_params,
702                                        dev_req_params);
703         if (ret) {
704                 pr_info("%s: failed to determine capabilities\n",
705                         __func__);
706         }
707
708         if (host->hw_ver.major >= 3) {
709                 ret = ufshcd_dme_configure_adapt(hba,
710                                            dev_req_params->gear_tx,
711                                            PA_INITIAL_ADAPT);
712         }
713
714         return ret;
715 }
716
717 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
718                                      enum ufs_notify_change_status stage,
719                                      struct ufs_pa_layer_attr *dev_max_params,
720                                      struct ufs_pa_layer_attr *dev_req_params)
721 {
722         int ret = 0;
723
724         switch (stage) {
725         case PRE_CHANGE:
726                 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
727                                              dev_req_params);
728                 break;
729         case POST_CHANGE:
730                 break;
731         default:
732                 ret = -EINVAL;
733                 break;
734         }
735
736         return ret;
737 }
738
739 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
740 {
741         int ret;
742         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
743
744         ret = ufshcd_dme_set(hba,
745                              UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
746                              lpm ? 1 : 0);
747         if (!ret || !lpm) {
748                 /*
749                  * Forcibly set as non-LPM mode if UIC commands is failed
750                  * to use default hba_enable_delay_us value for re-enabling
751                  * the host.
752                  */
753                 host->unipro_lpm = lpm;
754         }
755
756         return ret;
757 }
758
759 static int ufs_mtk_pre_link(struct ufs_hba *hba)
760 {
761         int ret;
762         u32 tmp;
763
764         ufs_mtk_get_controller_version(hba);
765
766         ret = ufs_mtk_unipro_set_lpm(hba, false);
767         if (ret)
768                 return ret;
769
770         /*
771          * Setting PA_Local_TX_LCC_Enable to 0 before link startup
772          * to make sure that both host and device TX LCC are disabled
773          * once link startup is completed.
774          */
775         ret = ufshcd_disable_host_tx_lcc(hba);
776         if (ret)
777                 return ret;
778
779         /* disable deep stall */
780         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
781         if (ret)
782                 return ret;
783
784         tmp &= ~(1 << 6);
785
786         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
787
788         return ret;
789 }
790
791 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
792 {
793         unsigned long flags;
794         u32 ah_ms;
795
796         if (ufshcd_is_clkgating_allowed(hba)) {
797                 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
798                         ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
799                                           hba->ahit);
800                 else
801                         ah_ms = 10;
802                 spin_lock_irqsave(hba->host->host_lock, flags);
803                 hba->clk_gating.delay_ms = ah_ms + 5;
804                 spin_unlock_irqrestore(hba->host->host_lock, flags);
805         }
806 }
807
808 static int ufs_mtk_post_link(struct ufs_hba *hba)
809 {
810         /* enable unipro clock gating feature */
811         ufs_mtk_cfg_unipro_cg(hba, true);
812
813         /* configure auto-hibern8 timer to 10ms */
814         if (ufshcd_is_auto_hibern8_supported(hba)) {
815                 ufshcd_auto_hibern8_update(hba,
816                         FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
817                         FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
818         }
819
820         ufs_mtk_setup_clk_gating(hba);
821
822         return 0;
823 }
824
825 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
826                                        enum ufs_notify_change_status stage)
827 {
828         int ret = 0;
829
830         switch (stage) {
831         case PRE_CHANGE:
832                 ret = ufs_mtk_pre_link(hba);
833                 break;
834         case POST_CHANGE:
835                 ret = ufs_mtk_post_link(hba);
836                 break;
837         default:
838                 ret = -EINVAL;
839                 break;
840         }
841
842         return ret;
843 }
844
845 static int ufs_mtk_device_reset(struct ufs_hba *hba)
846 {
847         struct arm_smccc_res res;
848
849         ufs_mtk_device_reset_ctrl(0, res);
850
851         /*
852          * The reset signal is active low. UFS devices shall detect
853          * more than or equal to 1us of positive or negative RST_n
854          * pulse width.
855          *
856          * To be on safe side, keep the reset low for at least 10us.
857          */
858         usleep_range(10, 15);
859
860         ufs_mtk_device_reset_ctrl(1, res);
861
862         /* Some devices may need time to respond to rst_n */
863         usleep_range(10000, 15000);
864
865         dev_info(hba->dev, "device reset done\n");
866
867         return 0;
868 }
869
870 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
871 {
872         int err;
873
874         err = ufshcd_hba_enable(hba);
875         if (err)
876                 return err;
877
878         err = ufs_mtk_unipro_set_lpm(hba, false);
879         if (err)
880                 return err;
881
882         err = ufshcd_uic_hibern8_exit(hba);
883         if (!err)
884                 ufshcd_set_link_active(hba);
885         else
886                 return err;
887
888         err = ufshcd_make_hba_operational(hba);
889         if (err)
890                 return err;
891
892         return 0;
893 }
894
895 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
896 {
897         int err;
898
899         err = ufs_mtk_unipro_set_lpm(hba, true);
900         if (err) {
901                 /* Resume UniPro state for following error recovery */
902                 ufs_mtk_unipro_set_lpm(hba, false);
903                 return err;
904         }
905
906         return 0;
907 }
908
909 static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
910 {
911         if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
912                 return;
913
914         if (lpm && !hba->vreg_info.vcc->enabled)
915                 regulator_set_mode(hba->vreg_info.vccq2->reg,
916                                    REGULATOR_MODE_IDLE);
917         else if (!lpm)
918                 regulator_set_mode(hba->vreg_info.vccq2->reg,
919                                    REGULATOR_MODE_NORMAL);
920 }
921
922 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
923 {
924         int err;
925         struct arm_smccc_res res;
926
927         if (ufshcd_is_link_hibern8(hba)) {
928                 err = ufs_mtk_link_set_lpm(hba);
929                 if (err)
930                         goto fail;
931         }
932
933         if (!ufshcd_is_link_active(hba)) {
934                 /*
935                  * Make sure no error will be returned to prevent
936                  * ufshcd_suspend() re-enabling regulators while vreg is still
937                  * in low-power mode.
938                  */
939                 ufs_mtk_vreg_set_lpm(hba, true);
940                 err = ufs_mtk_mphy_power_on(hba, false);
941                 if (err)
942                         goto fail;
943         }
944
945         if (ufshcd_is_link_off(hba))
946                 ufs_mtk_device_reset_ctrl(0, res);
947
948         return 0;
949 fail:
950         /*
951          * Set link as off state enforcedly to trigger
952          * ufshcd_host_reset_and_restore() in ufshcd_suspend()
953          * for completed host reset.
954          */
955         ufshcd_set_link_off(hba);
956         return -EAGAIN;
957 }
958
959 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
960 {
961         int err;
962
963         err = ufs_mtk_mphy_power_on(hba, true);
964         if (err)
965                 goto fail;
966
967         ufs_mtk_vreg_set_lpm(hba, false);
968
969         if (ufshcd_is_link_hibern8(hba)) {
970                 err = ufs_mtk_link_set_hpm(hba);
971                 if (err)
972                         goto fail;
973         }
974
975         return 0;
976 fail:
977         return ufshcd_link_recovery(hba);
978 }
979
980 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
981 {
982         ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
983
984         ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
985
986         ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
987                          REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
988                          "MPHY Ctrl ");
989
990         /* Direct debugging information to REG_MTK_PROBE */
991         ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
992         ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
993 }
994
995 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
996 {
997         struct ufs_dev_info *dev_info = &hba->dev_info;
998         u16 mid = dev_info->wmanufacturerid;
999
1000         if (mid == UFS_VENDOR_SAMSUNG)
1001                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1002
1003         /*
1004          * Decide waiting time before gating reference clock and
1005          * after ungating reference clock according to vendors'
1006          * requirements.
1007          */
1008         if (mid == UFS_VENDOR_SAMSUNG)
1009                 ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1);
1010         else if (mid == UFS_VENDOR_SKHYNIX)
1011                 ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30);
1012         else if (mid == UFS_VENDOR_TOSHIBA)
1013                 ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32);
1014
1015         return 0;
1016 }
1017
1018 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1019 {
1020         ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1021
1022         if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1023             (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1024                 hba->vreg_info.vcc->always_on = true;
1025                 /*
1026                  * VCC will be kept always-on thus we don't
1027                  * need any delay during regulator operations
1028                  */
1029                 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1030                         UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1031         }
1032 }
1033
1034 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1035                                  enum ufs_event_type evt, void *data)
1036 {
1037         unsigned int val = *(u32 *)data;
1038
1039         trace_ufs_mtk_event(evt, val);
1040 }
1041
1042 /*
1043  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1044  *
1045  * The variant operations configure the necessary controller and PHY
1046  * handshake during initialization.
1047  */
1048 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1049         .name                = "mediatek.ufshci",
1050         .init                = ufs_mtk_init,
1051         .setup_clocks        = ufs_mtk_setup_clocks,
1052         .hce_enable_notify   = ufs_mtk_hce_enable_notify,
1053         .link_startup_notify = ufs_mtk_link_startup_notify,
1054         .pwr_change_notify   = ufs_mtk_pwr_change_notify,
1055         .apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1056         .fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1057         .suspend             = ufs_mtk_suspend,
1058         .resume              = ufs_mtk_resume,
1059         .dbg_register_dump   = ufs_mtk_dbg_register_dump,
1060         .device_reset        = ufs_mtk_device_reset,
1061         .event_notify        = ufs_mtk_event_notify,
1062 };
1063
1064 /**
1065  * ufs_mtk_probe - probe routine of the driver
1066  * @pdev: pointer to Platform device handle
1067  *
1068  * Return zero for success and non-zero for failure
1069  */
1070 static int ufs_mtk_probe(struct platform_device *pdev)
1071 {
1072         int err;
1073         struct device *dev = &pdev->dev;
1074
1075         /* perform generic probe */
1076         err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1077         if (err)
1078                 dev_info(dev, "probe failed %d\n", err);
1079
1080         return err;
1081 }
1082
1083 /**
1084  * ufs_mtk_remove - set driver_data of the device to NULL
1085  * @pdev: pointer to platform device handle
1086  *
1087  * Always return 0
1088  */
1089 static int ufs_mtk_remove(struct platform_device *pdev)
1090 {
1091         struct ufs_hba *hba =  platform_get_drvdata(pdev);
1092
1093         pm_runtime_get_sync(&(pdev)->dev);
1094         ufshcd_remove(hba);
1095         return 0;
1096 }
1097
1098 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1099         .suspend         = ufshcd_pltfrm_suspend,
1100         .resume          = ufshcd_pltfrm_resume,
1101         .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1102         .runtime_resume  = ufshcd_pltfrm_runtime_resume,
1103         .runtime_idle    = ufshcd_pltfrm_runtime_idle,
1104 };
1105
1106 static struct platform_driver ufs_mtk_pltform = {
1107         .probe      = ufs_mtk_probe,
1108         .remove     = ufs_mtk_remove,
1109         .shutdown   = ufshcd_pltfrm_shutdown,
1110         .driver = {
1111                 .name   = "ufshcd-mtk",
1112                 .pm     = &ufs_mtk_pm_ops,
1113                 .of_match_table = ufs_mtk_of_match,
1114         },
1115 };
1116
1117 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1118 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1119 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1120 MODULE_LICENSE("GPL v2");
1121
1122 module_platform_driver(ufs_mtk_pltform);