1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 MediaTek Inc.
5 * Stanley Chu <stanley.chu@mediatek.com>
6 * Peter Wang <peter.wang@mediatek.com>
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
12 #include <linux/of_address.h>
13 #include <linux/phy/phy.h>
14 #include <linux/platform_device.h>
15 #include <linux/regulator/consumer.h>
16 #include <linux/soc/mediatek/mtk_sip_svc.h>
19 #include "ufshcd-crypto.h"
20 #include "ufshcd-pltfrm.h"
21 #include "ufs_quirks.h"
23 #include "ufs-mediatek.h"
25 #define ufs_mtk_smc(cmd, val, res) \
26 arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
27 cmd, val, 0, 0, 0, 0, 0, &(res))
29 #define ufs_mtk_crypto_ctrl(res, enable) \
30 ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
32 #define ufs_mtk_ref_clk_notify(on, res) \
33 ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
35 #define ufs_mtk_device_reset_ctrl(high, res) \
36 ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
38 static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
39 UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
40 UFS_DEVICE_QUIRK_DELAY_AFTER_LPM),
41 UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
42 UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
46 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
52 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
54 (1 << RX_SYMBOL_CLK_GATE_EN) |
55 (1 << SYS_CLK_GATE_EN) |
56 (1 << TX_CLK_GATE_EN);
58 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
61 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
62 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
64 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
67 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
68 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
69 (1 << SYS_CLK_GATE_EN) |
70 (1 << TX_CLK_GATE_EN));
72 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
75 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
76 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
78 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
82 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
84 struct arm_smccc_res res;
86 ufs_mtk_crypto_ctrl(res, 1);
88 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
90 hba->caps &= ~UFSHCD_CAP_CRYPTO;
94 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
95 enum ufs_notify_change_status status)
97 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
99 if (status == PRE_CHANGE) {
100 if (host->unipro_lpm)
101 hba->vps->hba_enable_delay_us = 0;
103 hba->vps->hba_enable_delay_us = 600;
105 if (hba->caps & UFSHCD_CAP_CRYPTO)
106 ufs_mtk_crypto_enable(hba);
112 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
114 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
115 struct device *dev = hba->dev;
116 struct device_node *np = dev->of_node;
119 host->mphy = devm_of_phy_get_by_index(dev, np, 0);
121 if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
123 * UFS driver might be probed before the phy driver does.
124 * In that case we would like to return EPROBE_DEFER code.
128 "%s: required phy hasn't probed yet. err = %d\n",
130 } else if (IS_ERR(host->mphy)) {
131 err = PTR_ERR(host->mphy);
132 dev_info(dev, "%s: PHY get failed %d\n", __func__, err);
138 * Allow unbound mphy because not every platform needs specific
147 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
149 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
150 struct arm_smccc_res res;
151 ktime_t timeout, time_checked;
154 if (host->ref_clk_enabled == on)
158 ufs_mtk_ref_clk_notify(on, res);
159 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
160 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
162 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
166 timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
168 time_checked = ktime_get();
169 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
171 /* Wait until ack bit equals to req bit */
172 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
175 usleep_range(100, 200);
176 } while (ktime_before(time_checked, timeout));
178 dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
180 ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
185 host->ref_clk_enabled = on;
187 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
188 ufs_mtk_ref_clk_notify(on, res);
194 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
195 u16 gating_us, u16 ungating_us)
197 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
199 if (hba->dev_info.clk_gating_wait_us) {
200 host->ref_clk_gating_wait_us =
201 hba->dev_info.clk_gating_wait_us;
203 host->ref_clk_gating_wait_us = gating_us;
206 host->ref_clk_ungating_wait_us = ungating_us;
209 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
210 unsigned long max_wait_ms)
212 ktime_t timeout, time_checked;
215 timeout = ktime_add_us(ktime_get(), ms_to_ktime(max_wait_ms));
217 time_checked = ktime_get();
218 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
219 val = ufshcd_readl(hba, REG_UFS_PROBE);
225 /* Sleep for max. 200us */
226 usleep_range(100, 200);
227 } while (ktime_before(time_checked, timeout));
235 static void ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
237 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
238 struct phy *mphy = host->mphy;
243 if (on && !host->mphy_powered_on)
245 else if (!on && host->mphy_powered_on)
249 host->mphy_powered_on = on;
253 * ufs_mtk_setup_clocks - enables/disable clocks
254 * @hba: host controller instance
255 * @on: If true, enable clocks else disable them.
256 * @status: PRE_CHANGE or POST_CHANGE notify
258 * Returns 0 on success, non-zero on failure.
260 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
261 enum ufs_notify_change_status status)
263 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
265 bool clk_pwr_off = false;
268 * In case ufs_mtk_init() is not yet done, simply ignore.
269 * This ufs_mtk_setup_clocks() shall be called from
270 * ufs_mtk_init() after init is done.
275 if (!on && status == PRE_CHANGE) {
276 if (ufshcd_is_link_off(hba)) {
278 } else if (ufshcd_is_link_hibern8(hba) ||
279 (!ufshcd_can_hibern8_during_gating(hba) &&
280 ufshcd_is_auto_hibern8_enabled(hba))) {
282 * Gate ref-clk and poweroff mphy if link state is in
283 * OFF or Hibern8 by either Auto-Hibern8 or
284 * ufshcd_link_state_transition().
286 ret = ufs_mtk_wait_link_state(hba,
294 ufs_mtk_setup_ref_clk(hba, on);
295 ufs_mtk_mphy_power_on(hba, on);
297 } else if (on && status == POST_CHANGE) {
298 ufs_mtk_mphy_power_on(hba, on);
299 ufs_mtk_setup_ref_clk(hba, on);
306 * ufs_mtk_init - find other essential mmio bases
307 * @hba: host controller instance
309 * Binds PHY with controller and powers up PHY enabling clocks
312 * Returns -EPROBE_DEFER if binding fails, returns negative error
313 * on phy power up failure and returns zero on success.
315 static int ufs_mtk_init(struct ufs_hba *hba)
317 struct ufs_mtk_host *host;
318 struct device *dev = hba->dev;
321 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
324 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
329 ufshcd_set_variant(hba, host);
331 err = ufs_mtk_bind_mphy(hba);
333 goto out_variant_clear;
335 /* Enable runtime autosuspend */
336 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
338 /* Enable clock-gating */
339 hba->caps |= UFSHCD_CAP_CLK_GATING;
341 /* Enable inline encryption */
342 hba->caps |= UFSHCD_CAP_CRYPTO;
344 /* Enable WriteBooster */
345 hba->caps |= UFSHCD_CAP_WB_EN;
346 hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
349 * ufshcd_vops_init() is invoked after
350 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
351 * phy clock setup is skipped.
353 * Enable phy clocks specifically here.
355 ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
360 ufshcd_set_variant(hba, NULL);
365 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
366 struct ufs_pa_layer_attr *dev_max_params,
367 struct ufs_pa_layer_attr *dev_req_params)
369 struct ufs_dev_params host_cap;
372 host_cap.tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX;
373 host_cap.rx_lanes = UFS_MTK_LIMIT_NUM_LANES_RX;
374 host_cap.hs_rx_gear = UFS_MTK_LIMIT_HSGEAR_RX;
375 host_cap.hs_tx_gear = UFS_MTK_LIMIT_HSGEAR_TX;
376 host_cap.pwm_rx_gear = UFS_MTK_LIMIT_PWMGEAR_RX;
377 host_cap.pwm_tx_gear = UFS_MTK_LIMIT_PWMGEAR_TX;
378 host_cap.rx_pwr_pwm = UFS_MTK_LIMIT_RX_PWR_PWM;
379 host_cap.tx_pwr_pwm = UFS_MTK_LIMIT_TX_PWR_PWM;
380 host_cap.rx_pwr_hs = UFS_MTK_LIMIT_RX_PWR_HS;
381 host_cap.tx_pwr_hs = UFS_MTK_LIMIT_TX_PWR_HS;
382 host_cap.hs_rate = UFS_MTK_LIMIT_HS_RATE;
383 host_cap.desired_working_mode =
384 UFS_MTK_LIMIT_DESIRED_MODE;
386 ret = ufshcd_get_pwr_dev_param(&host_cap,
390 pr_info("%s: failed to determine capabilities\n",
397 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
398 enum ufs_notify_change_status stage,
399 struct ufs_pa_layer_attr *dev_max_params,
400 struct ufs_pa_layer_attr *dev_req_params)
406 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
419 static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, u32 lpm)
422 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
424 ret = ufshcd_dme_set(hba,
425 UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
428 host->unipro_lpm = lpm;
433 static int ufs_mtk_pre_link(struct ufs_hba *hba)
438 ufs_mtk_unipro_set_pm(hba, 0);
441 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
442 * to make sure that both host and device TX LCC are disabled
443 * once link startup is completed.
445 ret = ufshcd_disable_host_tx_lcc(hba);
449 /* disable deep stall */
450 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
456 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
461 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
466 if (ufshcd_is_clkgating_allowed(hba)) {
467 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
468 ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
472 spin_lock_irqsave(hba->host->host_lock, flags);
473 hba->clk_gating.delay_ms = ah_ms + 5;
474 spin_unlock_irqrestore(hba->host->host_lock, flags);
478 static int ufs_mtk_post_link(struct ufs_hba *hba)
480 /* enable unipro clock gating feature */
481 ufs_mtk_cfg_unipro_cg(hba, true);
483 /* configure auto-hibern8 timer to 10ms */
484 if (ufshcd_is_auto_hibern8_supported(hba)) {
485 ufshcd_auto_hibern8_update(hba,
486 FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
487 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
490 ufs_mtk_setup_clk_gating(hba);
495 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
496 enum ufs_notify_change_status stage)
502 ret = ufs_mtk_pre_link(hba);
505 ret = ufs_mtk_post_link(hba);
515 static void ufs_mtk_device_reset(struct ufs_hba *hba)
517 struct arm_smccc_res res;
519 ufs_mtk_device_reset_ctrl(0, res);
522 * The reset signal is active low. UFS devices shall detect
523 * more than or equal to 1us of positive or negative RST_n
526 * To be on safe side, keep the reset low for at least 10us.
528 usleep_range(10, 15);
530 ufs_mtk_device_reset_ctrl(1, res);
532 /* Some devices may need time to respond to rst_n */
533 usleep_range(10000, 15000);
535 dev_info(hba->dev, "device reset done\n");
538 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
542 err = ufshcd_hba_enable(hba);
546 err = ufs_mtk_unipro_set_pm(hba, 0);
550 err = ufshcd_uic_hibern8_exit(hba);
552 ufshcd_set_link_active(hba);
556 err = ufshcd_make_hba_operational(hba);
563 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
567 err = ufs_mtk_unipro_set_pm(hba, 1);
569 /* Resume UniPro state for following error recovery */
570 ufs_mtk_unipro_set_pm(hba, 0);
577 static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
579 if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
582 if (lpm & !hba->vreg_info.vcc->enabled)
583 regulator_set_mode(hba->vreg_info.vccq2->reg,
584 REGULATOR_MODE_IDLE);
586 regulator_set_mode(hba->vreg_info.vccq2->reg,
587 REGULATOR_MODE_NORMAL);
590 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
594 if (ufshcd_is_link_hibern8(hba)) {
595 err = ufs_mtk_link_set_lpm(hba);
598 * Set link as off state enforcedly to trigger
599 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
600 * for completed host reset.
602 ufshcd_set_link_off(hba);
606 * Make sure no error will be returned to prevent
607 * ufshcd_suspend() re-enabling regulators while vreg is still
610 ufs_mtk_vreg_set_lpm(hba, true);
616 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
620 if (ufshcd_is_link_hibern8(hba)) {
621 ufs_mtk_vreg_set_lpm(hba, false);
622 err = ufs_mtk_link_set_hpm(hba);
624 err = ufshcd_link_recovery(hba);
632 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
634 ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
636 ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
638 ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
639 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
642 /* Direct debugging information to REG_MTK_PROBE */
643 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
644 ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
647 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
649 struct ufs_dev_info *dev_info = &hba->dev_info;
650 u16 mid = dev_info->wmanufacturerid;
652 if (mid == UFS_VENDOR_SAMSUNG)
653 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
656 * Decide waiting time before gating reference clock and
657 * after ungating reference clock according to vendors'
660 if (mid == UFS_VENDOR_SAMSUNG)
661 ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1);
662 else if (mid == UFS_VENDOR_SKHYNIX)
663 ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30);
664 else if (mid == UFS_VENDOR_TOSHIBA)
665 ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32);
670 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
672 struct ufs_dev_info *dev_info = &hba->dev_info;
673 u16 mid = dev_info->wmanufacturerid;
675 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
677 if (mid == UFS_VENDOR_SAMSUNG)
678 hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
682 * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
684 * The variant operations configure the necessary controller and PHY
685 * handshake during initialization.
687 static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
688 .name = "mediatek.ufshci",
689 .init = ufs_mtk_init,
690 .setup_clocks = ufs_mtk_setup_clocks,
691 .hce_enable_notify = ufs_mtk_hce_enable_notify,
692 .link_startup_notify = ufs_mtk_link_startup_notify,
693 .pwr_change_notify = ufs_mtk_pwr_change_notify,
694 .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
695 .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
696 .suspend = ufs_mtk_suspend,
697 .resume = ufs_mtk_resume,
698 .dbg_register_dump = ufs_mtk_dbg_register_dump,
699 .device_reset = ufs_mtk_device_reset,
703 * ufs_mtk_probe - probe routine of the driver
704 * @pdev: pointer to Platform device handle
706 * Return zero for success and non-zero for failure
708 static int ufs_mtk_probe(struct platform_device *pdev)
711 struct device *dev = &pdev->dev;
713 /* perform generic probe */
714 err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
716 dev_info(dev, "probe failed %d\n", err);
722 * ufs_mtk_remove - set driver_data of the device to NULL
723 * @pdev: pointer to platform device handle
727 static int ufs_mtk_remove(struct platform_device *pdev)
729 struct ufs_hba *hba = platform_get_drvdata(pdev);
731 pm_runtime_get_sync(&(pdev)->dev);
736 static const struct of_device_id ufs_mtk_of_match[] = {
737 { .compatible = "mediatek,mt8183-ufshci"},
741 static const struct dev_pm_ops ufs_mtk_pm_ops = {
742 .suspend = ufshcd_pltfrm_suspend,
743 .resume = ufshcd_pltfrm_resume,
744 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
745 .runtime_resume = ufshcd_pltfrm_runtime_resume,
746 .runtime_idle = ufshcd_pltfrm_runtime_idle,
749 static struct platform_driver ufs_mtk_pltform = {
750 .probe = ufs_mtk_probe,
751 .remove = ufs_mtk_remove,
752 .shutdown = ufshcd_pltfrm_shutdown,
754 .name = "ufshcd-mtk",
755 .pm = &ufs_mtk_pm_ops,
756 .of_match_table = ufs_mtk_of_match,
760 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
761 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
762 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
763 MODULE_LICENSE("GPL v2");
765 module_platform_driver(ufs_mtk_pltform);