Merge tag 'spi-fix-v5.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
[linux-2.6-microblaze.git] / drivers / scsi / ufs / ufs-mediatek.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *      Stanley Chu <stanley.chu@mediatek.com>
6  *      Peter Wang <peter.wang@mediatek.com>
7  */
8
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/of.h>
12 #include <linux/of_address.h>
13 #include <linux/phy/phy.h>
14 #include <linux/platform_device.h>
15 #include <linux/soc/mediatek/mtk_sip_svc.h>
16
17 #include "ufshcd.h"
18 #include "ufshcd-pltfrm.h"
19 #include "ufs_quirks.h"
20 #include "unipro.h"
21 #include "ufs-mediatek.h"
22
23 #define ufs_mtk_smc(cmd, val, res) \
24         arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
25                       cmd, val, 0, 0, 0, 0, 0, &(res))
26
27 #define ufs_mtk_ref_clk_notify(on, res) \
28         ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
29
30 #define ufs_mtk_device_reset_ctrl(high, res) \
31         ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
32
33 static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
34         UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
35                 UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
36         END_FIX
37 };
38
39 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
40 {
41         u32 tmp;
42
43         if (enable) {
44                 ufshcd_dme_get(hba,
45                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
46                 tmp = tmp |
47                       (1 << RX_SYMBOL_CLK_GATE_EN) |
48                       (1 << SYS_CLK_GATE_EN) |
49                       (1 << TX_CLK_GATE_EN);
50                 ufshcd_dme_set(hba,
51                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
52
53                 ufshcd_dme_get(hba,
54                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
55                 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
56                 ufshcd_dme_set(hba,
57                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
58         } else {
59                 ufshcd_dme_get(hba,
60                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
61                 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
62                               (1 << SYS_CLK_GATE_EN) |
63                               (1 << TX_CLK_GATE_EN));
64                 ufshcd_dme_set(hba,
65                                UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
66
67                 ufshcd_dme_get(hba,
68                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
69                 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
70                 ufshcd_dme_set(hba,
71                                UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
72         }
73 }
74
75 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
76                                      enum ufs_notify_change_status status)
77 {
78         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
79
80         if (status == PRE_CHANGE) {
81                 if (host->unipro_lpm)
82                         hba->vps->hba_enable_delay_us = 0;
83                 else
84                         hba->vps->hba_enable_delay_us = 600;
85         }
86
87         return 0;
88 }
89
90 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
91 {
92         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
93         struct device *dev = hba->dev;
94         struct device_node *np = dev->of_node;
95         int err = 0;
96
97         host->mphy = devm_of_phy_get_by_index(dev, np, 0);
98
99         if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
100                 /*
101                  * UFS driver might be probed before the phy driver does.
102                  * In that case we would like to return EPROBE_DEFER code.
103                  */
104                 err = -EPROBE_DEFER;
105                 dev_info(dev,
106                          "%s: required phy hasn't probed yet. err = %d\n",
107                         __func__, err);
108         } else if (IS_ERR(host->mphy)) {
109                 err = PTR_ERR(host->mphy);
110                 dev_info(dev, "%s: PHY get failed %d\n", __func__, err);
111         }
112
113         if (err)
114                 host->mphy = NULL;
115
116         return err;
117 }
118
119 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
120 {
121         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
122         struct arm_smccc_res res;
123         unsigned long timeout;
124         u32 value;
125
126         if (host->ref_clk_enabled == on)
127                 return 0;
128
129         if (on) {
130                 ufs_mtk_ref_clk_notify(on, res);
131                 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
132                 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
133         } else {
134                 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
135         }
136
137         /* Wait for ack */
138         timeout = jiffies + msecs_to_jiffies(REFCLK_REQ_TIMEOUT_MS);
139         do {
140                 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
141
142                 /* Wait until ack bit equals to req bit */
143                 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
144                         goto out;
145
146                 usleep_range(100, 200);
147         } while (time_before(jiffies, timeout));
148
149         dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
150
151         ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
152
153         return -ETIMEDOUT;
154
155 out:
156         host->ref_clk_enabled = on;
157         if (!on) {
158                 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
159                 ufs_mtk_ref_clk_notify(on, res);
160         }
161
162         return 0;
163 }
164
165 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
166                                           u16 gating_us, u16 ungating_us)
167 {
168         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
169
170         if (hba->dev_info.clk_gating_wait_us) {
171                 host->ref_clk_gating_wait_us =
172                         hba->dev_info.clk_gating_wait_us;
173         } else {
174                 host->ref_clk_gating_wait_us = gating_us;
175         }
176
177         host->ref_clk_ungating_wait_us = ungating_us;
178 }
179
180 static u32 ufs_mtk_link_get_state(struct ufs_hba *hba)
181 {
182         u32 val;
183
184         ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
185         val = ufshcd_readl(hba, REG_UFS_PROBE);
186         val = val >> 28;
187
188         return val;
189 }
190
191 /**
192  * ufs_mtk_setup_clocks - enables/disable clocks
193  * @hba: host controller instance
194  * @on: If true, enable clocks else disable them.
195  * @status: PRE_CHANGE or POST_CHANGE notify
196  *
197  * Returns 0 on success, non-zero on failure.
198  */
199 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
200                                 enum ufs_notify_change_status status)
201 {
202         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
203         int ret = 0;
204
205         /*
206          * In case ufs_mtk_init() is not yet done, simply ignore.
207          * This ufs_mtk_setup_clocks() shall be called from
208          * ufs_mtk_init() after init is done.
209          */
210         if (!host)
211                 return 0;
212
213         if (!on && status == PRE_CHANGE) {
214                 if (!ufshcd_is_link_active(hba)) {
215                         ufs_mtk_setup_ref_clk(hba, on);
216                         ret = phy_power_off(host->mphy);
217                 } else {
218                         /*
219                          * Gate ref-clk if link state is in Hibern8
220                          * triggered by Auto-Hibern8.
221                          */
222                         if (!ufshcd_can_hibern8_during_gating(hba) &&
223                             ufshcd_is_auto_hibern8_enabled(hba) &&
224                             ufs_mtk_link_get_state(hba) ==
225                             VS_LINK_HIBERN8)
226                                 ufs_mtk_setup_ref_clk(hba, on);
227                 }
228         } else if (on && status == POST_CHANGE) {
229                 ret = phy_power_on(host->mphy);
230                 ufs_mtk_setup_ref_clk(hba, on);
231         }
232
233         return ret;
234 }
235
236 /**
237  * ufs_mtk_init - find other essential mmio bases
238  * @hba: host controller instance
239  *
240  * Binds PHY with controller and powers up PHY enabling clocks
241  * and regulators.
242  *
243  * Returns -EPROBE_DEFER if binding fails, returns negative error
244  * on phy power up failure and returns zero on success.
245  */
246 static int ufs_mtk_init(struct ufs_hba *hba)
247 {
248         struct ufs_mtk_host *host;
249         struct device *dev = hba->dev;
250         int err = 0;
251
252         host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
253         if (!host) {
254                 err = -ENOMEM;
255                 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
256                 goto out;
257         }
258
259         host->hba = hba;
260         ufshcd_set_variant(hba, host);
261
262         err = ufs_mtk_bind_mphy(hba);
263         if (err)
264                 goto out_variant_clear;
265
266         /* Enable runtime autosuspend */
267         hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
268
269         /* Enable clock-gating */
270         hba->caps |= UFSHCD_CAP_CLK_GATING;
271
272         /* Enable WriteBooster */
273         hba->caps |= UFSHCD_CAP_WB_EN;
274         hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
275
276         /*
277          * ufshcd_vops_init() is invoked after
278          * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
279          * phy clock setup is skipped.
280          *
281          * Enable phy clocks specifically here.
282          */
283         ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
284
285         goto out;
286
287 out_variant_clear:
288         ufshcd_set_variant(hba, NULL);
289 out:
290         return err;
291 }
292
293 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
294                                   struct ufs_pa_layer_attr *dev_max_params,
295                                   struct ufs_pa_layer_attr *dev_req_params)
296 {
297         struct ufs_dev_params host_cap;
298         int ret;
299
300         host_cap.tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX;
301         host_cap.rx_lanes = UFS_MTK_LIMIT_NUM_LANES_RX;
302         host_cap.hs_rx_gear = UFS_MTK_LIMIT_HSGEAR_RX;
303         host_cap.hs_tx_gear = UFS_MTK_LIMIT_HSGEAR_TX;
304         host_cap.pwm_rx_gear = UFS_MTK_LIMIT_PWMGEAR_RX;
305         host_cap.pwm_tx_gear = UFS_MTK_LIMIT_PWMGEAR_TX;
306         host_cap.rx_pwr_pwm = UFS_MTK_LIMIT_RX_PWR_PWM;
307         host_cap.tx_pwr_pwm = UFS_MTK_LIMIT_TX_PWR_PWM;
308         host_cap.rx_pwr_hs = UFS_MTK_LIMIT_RX_PWR_HS;
309         host_cap.tx_pwr_hs = UFS_MTK_LIMIT_TX_PWR_HS;
310         host_cap.hs_rate = UFS_MTK_LIMIT_HS_RATE;
311         host_cap.desired_working_mode =
312                                 UFS_MTK_LIMIT_DESIRED_MODE;
313
314         ret = ufshcd_get_pwr_dev_param(&host_cap,
315                                        dev_max_params,
316                                        dev_req_params);
317         if (ret) {
318                 pr_info("%s: failed to determine capabilities\n",
319                         __func__);
320         }
321
322         return ret;
323 }
324
325 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
326                                      enum ufs_notify_change_status stage,
327                                      struct ufs_pa_layer_attr *dev_max_params,
328                                      struct ufs_pa_layer_attr *dev_req_params)
329 {
330         int ret = 0;
331
332         switch (stage) {
333         case PRE_CHANGE:
334                 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
335                                              dev_req_params);
336                 break;
337         case POST_CHANGE:
338                 break;
339         default:
340                 ret = -EINVAL;
341                 break;
342         }
343
344         return ret;
345 }
346
347 static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, u32 lpm)
348 {
349         int ret;
350         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
351
352         ret = ufshcd_dme_set(hba,
353                              UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
354                              lpm);
355         if (!ret)
356                 host->unipro_lpm = lpm;
357
358         return ret;
359 }
360
361 static int ufs_mtk_pre_link(struct ufs_hba *hba)
362 {
363         int ret;
364         u32 tmp;
365
366         ufs_mtk_unipro_set_pm(hba, 0);
367
368         /*
369          * Setting PA_Local_TX_LCC_Enable to 0 before link startup
370          * to make sure that both host and device TX LCC are disabled
371          * once link startup is completed.
372          */
373         ret = ufshcd_disable_host_tx_lcc(hba);
374         if (ret)
375                 return ret;
376
377         /* disable deep stall */
378         ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
379         if (ret)
380                 return ret;
381
382         tmp &= ~(1 << 6);
383
384         ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
385
386         return ret;
387 }
388
389 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
390 {
391         unsigned long flags;
392         u32 ah_ms;
393
394         if (ufshcd_is_clkgating_allowed(hba)) {
395                 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
396                         ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
397                                           hba->ahit);
398                 else
399                         ah_ms = 10;
400                 spin_lock_irqsave(hba->host->host_lock, flags);
401                 hba->clk_gating.delay_ms = ah_ms + 5;
402                 spin_unlock_irqrestore(hba->host->host_lock, flags);
403         }
404 }
405
406 static int ufs_mtk_post_link(struct ufs_hba *hba)
407 {
408         /* enable unipro clock gating feature */
409         ufs_mtk_cfg_unipro_cg(hba, true);
410
411         /* configure auto-hibern8 timer to 10ms */
412         if (ufshcd_is_auto_hibern8_supported(hba)) {
413                 ufshcd_auto_hibern8_update(hba,
414                         FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
415                         FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
416         }
417
418         ufs_mtk_setup_clk_gating(hba);
419
420         return 0;
421 }
422
423 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
424                                        enum ufs_notify_change_status stage)
425 {
426         int ret = 0;
427
428         switch (stage) {
429         case PRE_CHANGE:
430                 ret = ufs_mtk_pre_link(hba);
431                 break;
432         case POST_CHANGE:
433                 ret = ufs_mtk_post_link(hba);
434                 break;
435         default:
436                 ret = -EINVAL;
437                 break;
438         }
439
440         return ret;
441 }
442
443 static void ufs_mtk_device_reset(struct ufs_hba *hba)
444 {
445         struct arm_smccc_res res;
446
447         ufs_mtk_device_reset_ctrl(0, res);
448
449         /*
450          * The reset signal is active low. UFS devices shall detect
451          * more than or equal to 1us of positive or negative RST_n
452          * pulse width.
453          *
454          * To be on safe side, keep the reset low for at least 10us.
455          */
456         usleep_range(10, 15);
457
458         ufs_mtk_device_reset_ctrl(1, res);
459
460         /* Some devices may need time to respond to rst_n */
461         usleep_range(10000, 15000);
462
463         dev_info(hba->dev, "device reset done\n");
464 }
465
466 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
467 {
468         int err;
469
470         err = ufshcd_hba_enable(hba);
471         if (err)
472                 return err;
473
474         err = ufs_mtk_unipro_set_pm(hba, 0);
475         if (err)
476                 return err;
477
478         err = ufshcd_uic_hibern8_exit(hba);
479         if (!err)
480                 ufshcd_set_link_active(hba);
481         else
482                 return err;
483
484         err = ufshcd_make_hba_operational(hba);
485         if (err)
486                 return err;
487
488         return 0;
489 }
490
491 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
492 {
493         int err;
494
495         err = ufs_mtk_unipro_set_pm(hba, 1);
496         if (err) {
497                 /* Resume UniPro state for following error recovery */
498                 ufs_mtk_unipro_set_pm(hba, 0);
499                 return err;
500         }
501
502         return 0;
503 }
504
505 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
506 {
507         int err;
508         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
509
510         if (ufshcd_is_link_hibern8(hba)) {
511                 err = ufs_mtk_link_set_lpm(hba);
512                 if (err) {
513                         /*
514                          * Set link as off state enforcedly to trigger
515                          * ufshcd_host_reset_and_restore() in ufshcd_suspend()
516                          * for completed host reset.
517                          */
518                         ufshcd_set_link_off(hba);
519                         return -EAGAIN;
520                 }
521         }
522
523         if (!ufshcd_is_link_active(hba))
524                 phy_power_off(host->mphy);
525
526         return 0;
527 }
528
529 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
530 {
531         struct ufs_mtk_host *host = ufshcd_get_variant(hba);
532         int err;
533
534         if (!ufshcd_is_link_active(hba))
535                 phy_power_on(host->mphy);
536
537         if (ufshcd_is_link_hibern8(hba)) {
538                 err = ufs_mtk_link_set_hpm(hba);
539                 if (err) {
540                         err = ufshcd_link_recovery(hba);
541                         return err;
542                 }
543         }
544
545         return 0;
546 }
547
548 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
549 {
550         ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
551
552         ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
553
554         ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
555                          REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
556                          "MPHY Ctrl ");
557
558         /* Direct debugging information to REG_MTK_PROBE */
559         ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
560         ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
561 }
562
563 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
564 {
565         struct ufs_dev_info *dev_info = &hba->dev_info;
566         u16 mid = dev_info->wmanufacturerid;
567
568         if (mid == UFS_VENDOR_SAMSUNG)
569                 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
570
571         /*
572          * Decide waiting time before gating reference clock and
573          * after ungating reference clock according to vendors'
574          * requirements.
575          */
576         if (mid == UFS_VENDOR_SAMSUNG)
577                 ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1);
578         else if (mid == UFS_VENDOR_SKHYNIX)
579                 ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30);
580         else if (mid == UFS_VENDOR_TOSHIBA)
581                 ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32);
582
583         return 0;
584 }
585
586 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
587 {
588         struct ufs_dev_info *dev_info = &hba->dev_info;
589         u16 mid = dev_info->wmanufacturerid;
590
591         ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
592
593         if (mid == UFS_VENDOR_SAMSUNG)
594                 hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
595 }
596
597 /**
598  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
599  *
600  * The variant operations configure the necessary controller and PHY
601  * handshake during initialization.
602  */
603 static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
604         .name                = "mediatek.ufshci",
605         .init                = ufs_mtk_init,
606         .setup_clocks        = ufs_mtk_setup_clocks,
607         .hce_enable_notify   = ufs_mtk_hce_enable_notify,
608         .link_startup_notify = ufs_mtk_link_startup_notify,
609         .pwr_change_notify   = ufs_mtk_pwr_change_notify,
610         .apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
611         .fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
612         .suspend             = ufs_mtk_suspend,
613         .resume              = ufs_mtk_resume,
614         .dbg_register_dump   = ufs_mtk_dbg_register_dump,
615         .device_reset        = ufs_mtk_device_reset,
616 };
617
618 /**
619  * ufs_mtk_probe - probe routine of the driver
620  * @pdev: pointer to Platform device handle
621  *
622  * Return zero for success and non-zero for failure
623  */
624 static int ufs_mtk_probe(struct platform_device *pdev)
625 {
626         int err;
627         struct device *dev = &pdev->dev;
628
629         /* perform generic probe */
630         err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
631         if (err)
632                 dev_info(dev, "probe failed %d\n", err);
633
634         return err;
635 }
636
637 /**
638  * ufs_mtk_remove - set driver_data of the device to NULL
639  * @pdev: pointer to platform device handle
640  *
641  * Always return 0
642  */
643 static int ufs_mtk_remove(struct platform_device *pdev)
644 {
645         struct ufs_hba *hba =  platform_get_drvdata(pdev);
646
647         pm_runtime_get_sync(&(pdev)->dev);
648         ufshcd_remove(hba);
649         return 0;
650 }
651
652 static const struct of_device_id ufs_mtk_of_match[] = {
653         { .compatible = "mediatek,mt8183-ufshci"},
654         {},
655 };
656
657 static const struct dev_pm_ops ufs_mtk_pm_ops = {
658         .suspend         = ufshcd_pltfrm_suspend,
659         .resume          = ufshcd_pltfrm_resume,
660         .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
661         .runtime_resume  = ufshcd_pltfrm_runtime_resume,
662         .runtime_idle    = ufshcd_pltfrm_runtime_idle,
663 };
664
665 static struct platform_driver ufs_mtk_pltform = {
666         .probe      = ufs_mtk_probe,
667         .remove     = ufs_mtk_remove,
668         .shutdown   = ufshcd_pltfrm_shutdown,
669         .driver = {
670                 .name   = "ufshcd-mtk",
671                 .pm     = &ufs_mtk_pm_ops,
672                 .of_match_table = ufs_mtk_of_match,
673         },
674 };
675
676 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
677 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
678 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
679 MODULE_LICENSE("GPL v2");
680
681 module_platform_driver(ufs_mtk_pltform);