Merge tag 'samsung-soc-5.10' of https://git.kernel.org/pub/scm/linux/kernel/git/krzk...
[linux-2.6-microblaze.git] / drivers / scsi / ufs / ufs-mediatek.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *      Stanley Chu <stanley.chu@mediatek.com>
6  *      Peter Wang <peter.wang@mediatek.com>
7  */
8
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/of.h>
12 #include <linux/of_address.h>
13 #include <linux/phy/phy.h>
14 #include <linux/platform_device.h>
15 #include <linux/regulator/consumer.h>
16 #include <linux/soc/mediatek/mtk_sip_svc.h>
17
18 #include "ufshcd.h"
19 #include "ufshcd-crypto.h"
20 #include "ufshcd-pltfrm.h"
21 #include "ufs_quirks.h"
22 #include "unipro.h"
23 #include "ufs-mediatek.h"
24
25 #define ufs_mtk_smc(cmd, val, res) \
26         arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
27                       cmd, val, 0, 0, 0, 0, 0, &(res))
28
29 #define ufs_mtk_crypto_ctrl(res, enable) \
30         ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
31
32 #define ufs_mtk_ref_clk_notify(on, res) \
33         ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
34
35 #define ufs_mtk_device_reset_ctrl(high, res) \
36         ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
37
38 static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
39         UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
40                 UFS_DEVICE_QUIRK_DELAY_AFTER_LPM),
41         UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
42                 UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
43         END_FIX
44 };
45
46 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
47 {
48         u32 tmp;
49
50         if (enable) {
51                 ufshcd_dme_get(hba,
52                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
53                 tmp = tmp |
54                       (1 << RX_SYMBOL_CLK_GATE_EN) |
55                       (1 << SYS_CLK_GATE_EN) |
56                       (1 << TX_CLK_GATE_EN);
57                 ufshcd_dme_set(hba,
58                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
59
60                 ufshcd_dme_get(hba,
61                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
62                 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
63                 ufshcd_dme_set(hba,
64                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
65         } else {
66                 ufshcd_dme_get(hba,
67                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
68                 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
69                               (1 << SYS_CLK_GATE_EN) |
70                               (1 << TX_CLK_GATE_EN));
71                 ufshcd_dme_set(hba,
72                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
73
74                 ufshcd_dme_get(hba,
75                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
76                 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
77                 ufshcd_dme_set(hba,
78                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
79         }
80 }
81
82 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
83 {
84         struct arm_smccc_res res;
85
86         ufs_mtk_crypto_ctrl(res, 1);
87         if (res.a0) {
88                 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
89                          __func__, res.a0);
90                 hba->caps &= ~UFSHCD_CAP_CRYPTO;
91         }
92 }
93
94 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
95                                      enum ufs_notify_change_status status)
96 {
97         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
98
99         if (status == PRE_CHANGE) {
100                 if (host->unipro_lpm)
101                         hba->vps->hba_enable_delay_us = 0;
102                 else
103                         hba->vps->hba_enable_delay_us = 600;
104
105                 if (hba->caps & UFSHCD_CAP_CRYPTO)
106                         ufs_mtk_crypto_enable(hba);
107         }
108
109         return 0;
110 }
111
112 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
113 {
114         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
115         struct device *dev = hba->dev;
116         struct device_node *np = dev->of_node;
117         int err = 0;
118
119         host->mphy = devm_of_phy_get_by_index(dev, np, 0);
120
121         if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
122                 /*
123                  * UFS driver might be probed before the phy driver does.
124                  * In that case we would like to return EPROBE_DEFER code.
125                  */
126                 err = -EPROBE_DEFER;
127                 dev_info(dev,
128                          "%s: required phy hasn't probed yet. err = %d\n",
129                         __func__, err);
130         } else if (IS_ERR(host->mphy)) {
131                 err = PTR_ERR(host->mphy);
132                 dev_info(dev, "%s: PHY get failed %d\n", __func__, err);
133         }
134
135         if (err)
136                 host->mphy = NULL;
137         /*
138          * Allow unbound mphy because not every platform needs specific
139          * mphy control.
140          */
141         if (err == -ENODEV)
142                 err = 0;
143
144         return err;
145 }
146
147 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
148 {
149         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
150         struct arm_smccc_res res;
151         ktime_t timeout, time_checked;
152         u32 value;
153
154         if (host->ref_clk_enabled == on)
155                 return 0;
156
157         if (on) {
158                 ufs_mtk_ref_clk_notify(on, res);
159                 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
160                 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
161         } else {
162                 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
163         }
164
165         /* Wait for ack */
166         timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
167         do {
168                 time_checked = ktime_get();
169                 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
170
171                 /* Wait until ack bit equals to req bit */
172                 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
173                         goto out;
174
175                 usleep_range(100, 200);
176         } while (ktime_before(time_checked, timeout));
177
178         dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
179
180         ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
181
182         return -ETIMEDOUT;
183
184 out:
185         host->ref_clk_enabled = on;
186         if (!on) {
187                 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
188                 ufs_mtk_ref_clk_notify(on, res);
189         }
190
191         return 0;
192 }
193
194 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
195                                           u16 gating_us, u16 ungating_us)
196 {
197         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
198
199         if (hba->dev_info.clk_gating_wait_us) {
200                 host->ref_clk_gating_wait_us =
201                         hba->dev_info.clk_gating_wait_us;
202         } else {
203                 host->ref_clk_gating_wait_us = gating_us;
204         }
205
206         host->ref_clk_ungating_wait_us = ungating_us;
207 }
208
209 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
210                                    unsigned long max_wait_ms)
211 {
212         ktime_t timeout, time_checked;
213         u32 val;
214
215         timeout = ktime_add_ms(ktime_get(), max_wait_ms);
216         do {
217                 time_checked = ktime_get();
218                 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
219                 val = ufshcd_readl(hba, REG_UFS_PROBE);
220                 val = val >> 28;
221
222                 if (val == state)
223                         return 0;
224
225                 /* Sleep for max. 200us */
226                 usleep_range(100, 200);
227         } while (ktime_before(time_checked, timeout));
228
229         if (val == state)
230                 return 0;
231
232         return -ETIMEDOUT;
233 }
234
235 static void ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
236 {
237         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
238         struct phy *mphy = host->mphy;
239
240         if (!mphy)
241                 return;
242
243         if (on && !host->mphy_powered_on)
244                 phy_power_on(mphy);
245         else if (!on && host->mphy_powered_on)
246                 phy_power_off(mphy);
247         else
248                 return;
249         host->mphy_powered_on = on;
250 }
251
252 /**
253  * ufs_mtk_setup_clocks - enables/disable clocks
254  * @hba: host controller instance
255  * @on: If true, enable clocks else disable them.
256  * @status: PRE_CHANGE or POST_CHANGE notify
257  *
258  * Returns 0 on success, non-zero on failure.
259  */
260 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
261                                 enum ufs_notify_change_status status)
262 {
263         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
264         int ret = 0;
265         bool clk_pwr_off = false;
266
267         /*
268          * In case ufs_mtk_init() is not yet done, simply ignore.
269          * This ufs_mtk_setup_clocks() shall be called from
270          * ufs_mtk_init() after init is done.
271          */
272         if (!host)
273                 return 0;
274
275         if (!on && status == PRE_CHANGE) {
276                 if (ufshcd_is_link_off(hba)) {
277                         clk_pwr_off = true;
278                 } else if (ufshcd_is_link_hibern8(hba) ||
279                          (!ufshcd_can_hibern8_during_gating(hba) &&
280                          ufshcd_is_auto_hibern8_enabled(hba))) {
281                         /*
282                          * Gate ref-clk and poweroff mphy if link state is in
283                          * OFF or Hibern8 by either Auto-Hibern8 or
284                          * ufshcd_link_state_transition().
285                          */
286                         ret = ufs_mtk_wait_link_state(hba,
287                                                       VS_LINK_HIBERN8,
288                                                       15);
289                         if (!ret)
290                                 clk_pwr_off = true;
291                 }
292
293                 if (clk_pwr_off) {
294                         ufs_mtk_setup_ref_clk(hba, on);
295                         ufs_mtk_mphy_power_on(hba, on);
296                 }
297         } else if (on && status == POST_CHANGE) {
298                 ufs_mtk_mphy_power_on(hba, on);
299                 ufs_mtk_setup_ref_clk(hba, on);
300         }
301
302         return ret;
303 }
304
305 /**
306  * ufs_mtk_init - find other essential mmio bases
307  * @hba: host controller instance
308  *
309  * Binds PHY with controller and powers up PHY enabling clocks
310  * and regulators.
311  *
312  * Returns -EPROBE_DEFER if binding fails, returns negative error
313  * on phy power up failure and returns zero on success.
314  */
315 static int ufs_mtk_init(struct ufs_hba *hba)
316 {
317         struct ufs_mtk_host *host;
318         struct device *dev = hba->dev;
319         int err = 0;
320
321         host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
322         if (!host) {
323                 err = -ENOMEM;
324                 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
325                 goto out;
326         }
327
328         host->hba = hba;
329         ufshcd_set_variant(hba, host);
330
331         err = ufs_mtk_bind_mphy(hba);
332         if (err)
333                 goto out_variant_clear;
334
335         /* Enable runtime autosuspend */
336         hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
337
338         /* Enable clock-gating */
339         hba->caps |= UFSHCD_CAP_CLK_GATING;
340
341         /* Enable inline encryption */
342         hba->caps |= UFSHCD_CAP_CRYPTO;
343
344         /* Enable WriteBooster */
345         hba->caps |= UFSHCD_CAP_WB_EN;
346         hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
347
348         /*
349          * ufshcd_vops_init() is invoked after
350          * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
351          * phy clock setup is skipped.
352          *
353          * Enable phy clocks specifically here.
354          */
355         ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
356
357         goto out;
358
359 out_variant_clear:
360         ufshcd_set_variant(hba, NULL);
361 out:
362         return err;
363 }
364
365 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
366                                   struct ufs_pa_layer_attr *dev_max_params,
367                                   struct ufs_pa_layer_attr *dev_req_params)
368 {
369         struct ufs_dev_params host_cap;
370         int ret;
371
372         host_cap.tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX;
373         host_cap.rx_lanes = UFS_MTK_LIMIT_NUM_LANES_RX;
374         host_cap.hs_rx_gear = UFS_MTK_LIMIT_HSGEAR_RX;
375         host_cap.hs_tx_gear = UFS_MTK_LIMIT_HSGEAR_TX;
376         host_cap.pwm_rx_gear = UFS_MTK_LIMIT_PWMGEAR_RX;
377         host_cap.pwm_tx_gear = UFS_MTK_LIMIT_PWMGEAR_TX;
378         host_cap.rx_pwr_pwm = UFS_MTK_LIMIT_RX_PWR_PWM;
379         host_cap.tx_pwr_pwm = UFS_MTK_LIMIT_TX_PWR_PWM;
380         host_cap.rx_pwr_hs = UFS_MTK_LIMIT_RX_PWR_HS;
381         host_cap.tx_pwr_hs = UFS_MTK_LIMIT_TX_PWR_HS;
382         host_cap.hs_rate = UFS_MTK_LIMIT_HS_RATE;
383         host_cap.desired_working_mode =
384                                 UFS_MTK_LIMIT_DESIRED_MODE;
385
386         ret = ufshcd_get_pwr_dev_param(&host_cap,
387                                        dev_max_params,
388                                        dev_req_params);
389         if (ret) {
390                 pr_info("%s: failed to determine capabilities\n",
391                         __func__);
392         }
393
394         return ret;
395 }
396
397 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
398                                      enum ufs_notify_change_status stage,
399                                      struct ufs_pa_layer_attr *dev_max_params,
400                                      struct ufs_pa_layer_attr *dev_req_params)
401 {
402         int ret = 0;
403
404         switch (stage) {
405         case PRE_CHANGE:
406                 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
407                                              dev_req_params);
408                 break;
409         case POST_CHANGE:
410                 break;
411         default:
412                 ret = -EINVAL;
413                 break;
414         }
415
416         return ret;
417 }
418
419 static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, u32 lpm)
420 {
421         int ret;
422         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
423
424         ret = ufshcd_dme_set(hba,
425                              UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
426                              lpm);
427         if (!ret)
428                 host->unipro_lpm = lpm;
429
430         return ret;
431 }
432
433 static int ufs_mtk_pre_link(struct ufs_hba *hba)
434 {
435         int ret;
436         u32 tmp;
437
438         ufs_mtk_unipro_set_pm(hba, 0);
439
440         /*
441          * Setting PA_Local_TX_LCC_Enable to 0 before link startup
442          * to make sure that both host and device TX LCC are disabled
443          * once link startup is completed.
444          */
445         ret = ufshcd_disable_host_tx_lcc(hba);
446         if (ret)
447                 return ret;
448
449         /* disable deep stall */
450         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
451         if (ret)
452                 return ret;
453
454         tmp &= ~(1 << 6);
455
456         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
457
458         return ret;
459 }
460
461 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
462 {
463         unsigned long flags;
464         u32 ah_ms;
465
466         if (ufshcd_is_clkgating_allowed(hba)) {
467                 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
468                         ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
469                                           hba->ahit);
470                 else
471                         ah_ms = 10;
472                 spin_lock_irqsave(hba->host->host_lock, flags);
473                 hba->clk_gating.delay_ms = ah_ms + 5;
474                 spin_unlock_irqrestore(hba->host->host_lock, flags);
475         }
476 }
477
478 static int ufs_mtk_post_link(struct ufs_hba *hba)
479 {
480         /* enable unipro clock gating feature */
481         ufs_mtk_cfg_unipro_cg(hba, true);
482
483         /* configure auto-hibern8 timer to 10ms */
484         if (ufshcd_is_auto_hibern8_supported(hba)) {
485                 ufshcd_auto_hibern8_update(hba,
486                         FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
487                         FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
488         }
489
490         ufs_mtk_setup_clk_gating(hba);
491
492         return 0;
493 }
494
495 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
496                                        enum ufs_notify_change_status stage)
497 {
498         int ret = 0;
499
500         switch (stage) {
501         case PRE_CHANGE:
502                 ret = ufs_mtk_pre_link(hba);
503                 break;
504         case POST_CHANGE:
505                 ret = ufs_mtk_post_link(hba);
506                 break;
507         default:
508                 ret = -EINVAL;
509                 break;
510         }
511
512         return ret;
513 }
514
515 static void ufs_mtk_device_reset(struct ufs_hba *hba)
516 {
517         struct arm_smccc_res res;
518
519         ufs_mtk_device_reset_ctrl(0, res);
520
521         /*
522          * The reset signal is active low. UFS devices shall detect
523          * more than or equal to 1us of positive or negative RST_n
524          * pulse width.
525          *
526          * To be on safe side, keep the reset low for at least 10us.
527          */
528         usleep_range(10, 15);
529
530         ufs_mtk_device_reset_ctrl(1, res);
531
532         /* Some devices may need time to respond to rst_n */
533         usleep_range(10000, 15000);
534
535         dev_info(hba->dev, "device reset done\n");
536 }
537
538 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
539 {
540         int err;
541
542         err = ufshcd_hba_enable(hba);
543         if (err)
544                 return err;
545
546         err = ufs_mtk_unipro_set_pm(hba, 0);
547         if (err)
548                 return err;
549
550         err = ufshcd_uic_hibern8_exit(hba);
551         if (!err)
552                 ufshcd_set_link_active(hba);
553         else
554                 return err;
555
556         err = ufshcd_make_hba_operational(hba);
557         if (err)
558                 return err;
559
560         return 0;
561 }
562
563 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
564 {
565         int err;
566
567         err = ufs_mtk_unipro_set_pm(hba, 1);
568         if (err) {
569                 /* Resume UniPro state for following error recovery */
570                 ufs_mtk_unipro_set_pm(hba, 0);
571                 return err;
572         }
573
574         return 0;
575 }
576
577 static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
578 {
579         if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
580                 return;
581
582         if (lpm & !hba->vreg_info.vcc->enabled)
583                 regulator_set_mode(hba->vreg_info.vccq2->reg,
584                                    REGULATOR_MODE_IDLE);
585         else if (!lpm)
586                 regulator_set_mode(hba->vreg_info.vccq2->reg,
587                                    REGULATOR_MODE_NORMAL);
588 }
589
590 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
591 {
592         int err;
593
594         if (ufshcd_is_link_hibern8(hba)) {
595                 err = ufs_mtk_link_set_lpm(hba);
596                 if (err) {
597                         /*
598                          * Set link as off state enforcedly to trigger
599                          * ufshcd_host_reset_and_restore() in ufshcd_suspend()
600                          * for completed host reset.
601                          */
602                         ufshcd_set_link_off(hba);
603                         return -EAGAIN;
604                 }
605                 /*
606                  * Make sure no error will be returned to prevent
607                  * ufshcd_suspend() re-enabling regulators while vreg is still
608                  * in low-power mode.
609                  */
610                 ufs_mtk_vreg_set_lpm(hba, true);
611         }
612
613         return 0;
614 }
615
616 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
617 {
618         int err;
619
620         if (ufshcd_is_link_hibern8(hba)) {
621                 ufs_mtk_vreg_set_lpm(hba, false);
622                 err = ufs_mtk_link_set_hpm(hba);
623                 if (err) {
624                         err = ufshcd_link_recovery(hba);
625                         return err;
626                 }
627         }
628
629         return 0;
630 }
631
632 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
633 {
634         ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
635
636         ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
637
638         ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
639                          REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
640                          "MPHY Ctrl ");
641
642         /* Direct debugging information to REG_MTK_PROBE */
643         ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
644         ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
645 }
646
647 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
648 {
649         struct ufs_dev_info *dev_info = &hba->dev_info;
650         u16 mid = dev_info->wmanufacturerid;
651
652         if (mid == UFS_VENDOR_SAMSUNG)
653                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
654
655         /*
656          * Decide waiting time before gating reference clock and
657          * after ungating reference clock according to vendors'
658          * requirements.
659          */
660         if (mid == UFS_VENDOR_SAMSUNG)
661                 ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1);
662         else if (mid == UFS_VENDOR_SKHYNIX)
663                 ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30);
664         else if (mid == UFS_VENDOR_TOSHIBA)
665                 ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32);
666
667         return 0;
668 }
669
670 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
671 {
672         struct ufs_dev_info *dev_info = &hba->dev_info;
673         u16 mid = dev_info->wmanufacturerid;
674
675         ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
676
677         if (mid == UFS_VENDOR_SAMSUNG)
678                 hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
679 }
680
681 /**
682  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
683  *
684  * The variant operations configure the necessary controller and PHY
685  * handshake during initialization.
686  */
687 static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
688         .name                = "mediatek.ufshci",
689         .init                = ufs_mtk_init,
690         .setup_clocks        = ufs_mtk_setup_clocks,
691         .hce_enable_notify   = ufs_mtk_hce_enable_notify,
692         .link_startup_notify = ufs_mtk_link_startup_notify,
693         .pwr_change_notify   = ufs_mtk_pwr_change_notify,
694         .apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
695         .fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
696         .suspend             = ufs_mtk_suspend,
697         .resume              = ufs_mtk_resume,
698         .dbg_register_dump   = ufs_mtk_dbg_register_dump,
699         .device_reset        = ufs_mtk_device_reset,
700 };
701
702 /**
703  * ufs_mtk_probe - probe routine of the driver
704  * @pdev: pointer to Platform device handle
705  *
706  * Return zero for success and non-zero for failure
707  */
708 static int ufs_mtk_probe(struct platform_device *pdev)
709 {
710         int err;
711         struct device *dev = &pdev->dev;
712
713         /* perform generic probe */
714         err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
715         if (err)
716                 dev_info(dev, "probe failed %d\n", err);
717
718         return err;
719 }
720
721 /**
722  * ufs_mtk_remove - set driver_data of the device to NULL
723  * @pdev: pointer to platform device handle
724  *
725  * Always return 0
726  */
727 static int ufs_mtk_remove(struct platform_device *pdev)
728 {
729         struct ufs_hba *hba =  platform_get_drvdata(pdev);
730
731         pm_runtime_get_sync(&(pdev)->dev);
732         ufshcd_remove(hba);
733         return 0;
734 }
735
736 static const struct of_device_id ufs_mtk_of_match[] = {
737         { .compatible = "mediatek,mt8183-ufshci"},
738         {},
739 };
740
741 static const struct dev_pm_ops ufs_mtk_pm_ops = {
742         .suspend         = ufshcd_pltfrm_suspend,
743         .resume          = ufshcd_pltfrm_resume,
744         .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
745         .runtime_resume  = ufshcd_pltfrm_runtime_resume,
746         .runtime_idle    = ufshcd_pltfrm_runtime_idle,
747 };
748
749 static struct platform_driver ufs_mtk_pltform = {
750         .probe      = ufs_mtk_probe,
751         .remove     = ufs_mtk_remove,
752         .shutdown   = ufshcd_pltfrm_shutdown,
753         .driver = {
754                 .name   = "ufshcd-mtk",
755                 .pm     = &ufs_mtk_pm_ops,
756                 .of_match_table = ufs_mtk_of_match,
757         },
758 };
759
760 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
761 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
762 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
763 MODULE_LICENSE("GPL v2");
764
765 module_platform_driver(ufs_mtk_pltform);