1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 MediaTek Inc.
5 * Stanley Chu <stanley.chu@mediatek.com>
6 * Peter Wang <peter.wang@mediatek.com>
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
12 #include <linux/of_address.h>
13 #include <linux/of_device.h>
14 #include <linux/phy/phy.h>
15 #include <linux/platform_device.h>
16 #include <linux/regulator/consumer.h>
17 #include <linux/reset.h>
18 #include <linux/sched/clock.h>
19 #include <linux/soc/mediatek/mtk_sip_svc.h>
22 #include "ufshcd-crypto.h"
23 #include "ufshcd-pltfrm.h"
24 #include "ufs_quirks.h"
26 #include "ufs-mediatek.h"
28 #define CREATE_TRACE_POINTS
29 #include "ufs-mediatek-trace.h"
31 #define ufs_mtk_smc(cmd, val, res) \
32 arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
33 cmd, val, 0, 0, 0, 0, 0, &(res))
35 #define ufs_mtk_va09_pwr_ctrl(res, on) \
36 ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res)
38 #define ufs_mtk_crypto_ctrl(res, enable) \
39 ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
41 #define ufs_mtk_ref_clk_notify(on, res) \
42 ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
44 #define ufs_mtk_device_reset_ctrl(high, res) \
45 ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
47 static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
48 UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
49 UFS_DEVICE_QUIRK_DELAY_AFTER_LPM),
50 UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
51 UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
55 static const struct of_device_id ufs_mtk_of_match[] = {
56 { .compatible = "mediatek,mt8183-ufshci" },
60 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
62 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
64 return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
67 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
69 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
71 return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
74 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
76 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
78 return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
81 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
87 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
89 (1 << RX_SYMBOL_CLK_GATE_EN) |
90 (1 << SYS_CLK_GATE_EN) |
91 (1 << TX_CLK_GATE_EN);
93 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
96 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
97 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
99 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
102 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
103 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
104 (1 << SYS_CLK_GATE_EN) |
105 (1 << TX_CLK_GATE_EN));
107 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
110 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
111 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
113 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
117 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
119 struct arm_smccc_res res;
121 ufs_mtk_crypto_ctrl(res, 1);
123 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
125 hba->caps &= ~UFSHCD_CAP_CRYPTO;
129 static void ufs_mtk_host_reset(struct ufs_hba *hba)
131 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
133 reset_control_assert(host->hci_reset);
134 reset_control_assert(host->crypto_reset);
135 reset_control_assert(host->unipro_reset);
137 usleep_range(100, 110);
139 reset_control_deassert(host->unipro_reset);
140 reset_control_deassert(host->crypto_reset);
141 reset_control_deassert(host->hci_reset);
144 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
145 struct reset_control **rc,
148 *rc = devm_reset_control_get(hba->dev, str);
150 dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
156 static void ufs_mtk_init_reset(struct ufs_hba *hba)
158 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
160 ufs_mtk_init_reset_control(hba, &host->hci_reset,
162 ufs_mtk_init_reset_control(hba, &host->unipro_reset,
164 ufs_mtk_init_reset_control(hba, &host->crypto_reset,
168 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
169 enum ufs_notify_change_status status)
171 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
174 if (status == PRE_CHANGE) {
175 if (host->unipro_lpm) {
176 hba->vps->hba_enable_delay_us = 0;
178 hba->vps->hba_enable_delay_us = 600;
179 ufs_mtk_host_reset(hba);
182 if (hba->caps & UFSHCD_CAP_CRYPTO)
183 ufs_mtk_crypto_enable(hba);
185 if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
186 spin_lock_irqsave(hba->host->host_lock, flags);
187 ufshcd_writel(hba, 0,
188 REG_AUTO_HIBERNATE_IDLE_TIMER);
189 spin_unlock_irqrestore(hba->host->host_lock,
192 hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
200 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
202 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
203 struct device *dev = hba->dev;
204 struct device_node *np = dev->of_node;
207 host->mphy = devm_of_phy_get_by_index(dev, np, 0);
209 if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
211 * UFS driver might be probed before the phy driver does.
212 * In that case we would like to return EPROBE_DEFER code.
216 "%s: required phy hasn't probed yet. err = %d\n",
218 } else if (IS_ERR(host->mphy)) {
219 err = PTR_ERR(host->mphy);
220 if (err != -ENODEV) {
221 dev_info(dev, "%s: PHY get failed %d\n", __func__,
229 * Allow unbound mphy because not every platform needs specific
238 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
240 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
241 struct arm_smccc_res res;
242 ktime_t timeout, time_checked;
245 if (host->ref_clk_enabled == on)
249 ufs_mtk_ref_clk_notify(on, res);
250 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
252 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
253 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
257 timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
259 time_checked = ktime_get();
260 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
262 /* Wait until ack bit equals to req bit */
263 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
266 usleep_range(100, 200);
267 } while (ktime_before(time_checked, timeout));
269 dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
271 ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
276 host->ref_clk_enabled = on;
278 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
280 ufs_mtk_ref_clk_notify(on, res);
285 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
288 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
290 if (hba->dev_info.clk_gating_wait_us) {
291 host->ref_clk_gating_wait_us =
292 hba->dev_info.clk_gating_wait_us;
294 host->ref_clk_gating_wait_us = gating_us;
297 host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
300 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
302 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
304 if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
305 ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
306 ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
307 ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
308 ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
309 ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
311 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
315 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
316 unsigned long retry_ms)
318 u64 timeout, time_checked;
322 /* cannot use plain ktime_get() in suspend */
323 timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
325 /* wait a specific time after check base */
330 time_checked = ktime_get_mono_fast_ns();
331 ufs_mtk_dbg_sel(hba);
332 val = ufshcd_readl(hba, REG_UFS_PROBE);
337 * if state is in H8 enter and H8 enter confirm
338 * wait until return to idle state.
340 if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
344 } else if (!wait_idle)
347 if (wait_idle && (sm == VS_HCE_BASE))
349 } while (time_checked < timeout);
351 if (wait_idle && sm != VS_HCE_BASE)
352 dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
355 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
356 unsigned long max_wait_ms)
358 ktime_t timeout, time_checked;
361 timeout = ktime_add_ms(ktime_get(), max_wait_ms);
363 time_checked = ktime_get();
364 ufs_mtk_dbg_sel(hba);
365 val = ufshcd_readl(hba, REG_UFS_PROBE);
371 /* Sleep for max. 200us */
372 usleep_range(100, 200);
373 } while (ktime_before(time_checked, timeout));
381 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
383 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
384 struct phy *mphy = host->mphy;
385 struct arm_smccc_res res;
388 if (!mphy || !(on ^ host->mphy_powered_on))
392 if (ufs_mtk_is_va09_supported(hba)) {
393 ret = regulator_enable(host->reg_va09);
396 /* wait 200 us to stablize VA09 */
397 usleep_range(200, 210);
398 ufs_mtk_va09_pwr_ctrl(res, 1);
403 if (ufs_mtk_is_va09_supported(hba)) {
404 ufs_mtk_va09_pwr_ctrl(res, 0);
405 ret = regulator_disable(host->reg_va09);
413 "failed to %s va09: %d\n",
414 on ? "enable" : "disable",
417 host->mphy_powered_on = on;
423 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
424 struct clk **clk_out)
429 clk = devm_clk_get(dev, name);
438 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
440 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
441 struct ufs_mtk_crypt_cfg *cfg;
442 struct regulator *reg;
445 if (!ufs_mtk_is_boost_crypt_enabled(hba))
449 volt = cfg->vcore_volt;
450 reg = cfg->reg_vcore;
452 ret = clk_prepare_enable(cfg->clk_crypt_mux);
454 dev_info(hba->dev, "clk_prepare_enable(): %d\n",
460 ret = regulator_set_voltage(reg, volt, INT_MAX);
463 "failed to set vcore to %d\n", volt);
467 ret = clk_set_parent(cfg->clk_crypt_mux,
468 cfg->clk_crypt_perf);
471 "failed to set clk_crypt_perf\n");
472 regulator_set_voltage(reg, 0, INT_MAX);
476 ret = clk_set_parent(cfg->clk_crypt_mux,
480 "failed to set clk_crypt_lp\n");
484 ret = regulator_set_voltage(reg, 0, INT_MAX);
487 "failed to set vcore to MIN\n");
491 clk_disable_unprepare(cfg->clk_crypt_mux);
494 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
499 ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
501 dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
508 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
510 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
511 struct ufs_mtk_crypt_cfg *cfg;
512 struct device *dev = hba->dev;
513 struct regulator *reg;
516 host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
521 reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
523 dev_info(dev, "failed to get dvfsrc-vcore: %ld",
528 if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
530 dev_info(dev, "failed to get boost-crypt-vcore-min");
535 if (ufs_mtk_init_host_clk(hba, "crypt_mux",
536 &cfg->clk_crypt_mux))
539 if (ufs_mtk_init_host_clk(hba, "crypt_lp",
543 if (ufs_mtk_init_host_clk(hba, "crypt_perf",
544 &cfg->clk_crypt_perf))
547 cfg->reg_vcore = reg;
548 cfg->vcore_volt = volt;
549 host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
555 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
557 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
559 host->reg_va09 = regulator_get(hba->dev, "va09");
561 dev_info(hba->dev, "failed to get va09");
563 host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
566 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
568 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
569 struct device_node *np = hba->dev->of_node;
571 if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
572 ufs_mtk_init_boost_crypt(hba);
574 if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
575 ufs_mtk_init_va09_pwr_ctrl(hba);
577 if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
578 host->caps |= UFS_MTK_CAP_DISABLE_AH8;
580 if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
581 host->caps |= UFS_MTK_CAP_BROKEN_VCC;
583 dev_info(hba->dev, "caps: 0x%x", host->caps);
586 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool up)
588 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
590 ufs_mtk_boost_crypt(hba, up);
591 ufs_mtk_setup_ref_clk(hba, up);
594 phy_power_on(host->mphy);
596 phy_power_off(host->mphy);
600 * ufs_mtk_setup_clocks - enables/disable clocks
601 * @hba: host controller instance
602 * @on: If true, enable clocks else disable them.
603 * @status: PRE_CHANGE or POST_CHANGE notify
605 * Returns 0 on success, non-zero on failure.
607 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
608 enum ufs_notify_change_status status)
610 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
611 bool clk_pwr_off = false;
615 * In case ufs_mtk_init() is not yet done, simply ignore.
616 * This ufs_mtk_setup_clocks() shall be called from
617 * ufs_mtk_init() after init is done.
622 if (!on && status == PRE_CHANGE) {
623 if (ufshcd_is_link_off(hba)) {
625 } else if (ufshcd_is_link_hibern8(hba) ||
626 (!ufshcd_can_hibern8_during_gating(hba) &&
627 ufshcd_is_auto_hibern8_enabled(hba))) {
629 * Gate ref-clk and poweroff mphy if link state is in
630 * OFF or Hibern8 by either Auto-Hibern8 or
631 * ufshcd_link_state_transition().
633 ret = ufs_mtk_wait_link_state(hba,
641 ufs_mtk_scale_perf(hba, false);
642 } else if (on && status == POST_CHANGE) {
643 ufs_mtk_scale_perf(hba, true);
649 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
651 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
654 if (host->hw_ver.major)
657 /* Set default (minimum) version anyway */
658 host->hw_ver.major = 2;
660 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
662 if (ver >= UFS_UNIPRO_VER_1_8) {
663 host->hw_ver.major = 3;
665 * Fix HCI version for some platforms with
668 if (hba->ufs_version < ufshci_version(3, 0))
669 hba->ufs_version = ufshci_version(3, 0);
674 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
676 return hba->ufs_version;
680 * ufs_mtk_init - find other essential mmio bases
681 * @hba: host controller instance
683 * Binds PHY with controller and powers up PHY enabling clocks
686 * Returns -EPROBE_DEFER if binding fails, returns negative error
687 * on phy power up failure and returns zero on success.
689 static int ufs_mtk_init(struct ufs_hba *hba)
691 const struct of_device_id *id;
692 struct device *dev = hba->dev;
693 struct ufs_mtk_host *host;
696 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
699 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
704 ufshcd_set_variant(hba, host);
706 id = of_match_device(ufs_mtk_of_match, dev);
712 /* Initialize host capability */
713 ufs_mtk_init_host_caps(hba);
715 err = ufs_mtk_bind_mphy(hba);
717 goto out_variant_clear;
719 ufs_mtk_init_reset(hba);
721 /* Enable runtime autosuspend */
722 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
724 /* Enable clock-gating */
725 hba->caps |= UFSHCD_CAP_CLK_GATING;
727 /* Enable inline encryption */
728 hba->caps |= UFSHCD_CAP_CRYPTO;
730 /* Enable WriteBooster */
731 hba->caps |= UFSHCD_CAP_WB_EN;
732 hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
733 hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
735 if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
736 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
739 * ufshcd_vops_init() is invoked after
740 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
741 * phy clock setup is skipped.
743 * Enable phy clocks specifically here.
745 ufs_mtk_mphy_power_on(hba, true);
746 ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
748 host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
753 ufshcd_set_variant(hba, NULL);
758 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
759 struct ufs_pa_layer_attr *dev_max_params,
760 struct ufs_pa_layer_attr *dev_req_params)
762 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
763 struct ufs_dev_params host_cap;
766 ufshcd_init_pwr_dev_param(&host_cap);
767 host_cap.hs_rx_gear = UFS_HS_G4;
768 host_cap.hs_tx_gear = UFS_HS_G4;
770 ret = ufshcd_get_pwr_dev_param(&host_cap,
774 pr_info("%s: failed to determine capabilities\n",
778 if (host->hw_ver.major >= 3) {
779 ret = ufshcd_dme_configure_adapt(hba,
780 dev_req_params->gear_tx,
787 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
788 enum ufs_notify_change_status stage,
789 struct ufs_pa_layer_attr *dev_max_params,
790 struct ufs_pa_layer_attr *dev_req_params)
796 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
809 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
812 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
814 ret = ufshcd_dme_set(hba,
815 UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
819 * Forcibly set as non-LPM mode if UIC commands is failed
820 * to use default hba_enable_delay_us value for re-enabling
823 host->unipro_lpm = lpm;
829 static int ufs_mtk_pre_link(struct ufs_hba *hba)
834 ufs_mtk_get_controller_version(hba);
836 ret = ufs_mtk_unipro_set_lpm(hba, false);
841 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
842 * to make sure that both host and device TX LCC are disabled
843 * once link startup is completed.
845 ret = ufshcd_disable_host_tx_lcc(hba);
849 /* disable deep stall */
850 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
856 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
861 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
866 if (ufshcd_is_clkgating_allowed(hba)) {
867 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
868 ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
872 spin_lock_irqsave(hba->host->host_lock, flags);
873 hba->clk_gating.delay_ms = ah_ms + 5;
874 spin_unlock_irqrestore(hba->host->host_lock, flags);
878 static int ufs_mtk_post_link(struct ufs_hba *hba)
880 /* enable unipro clock gating feature */
881 ufs_mtk_cfg_unipro_cg(hba, true);
883 /* will be configured during probe hba */
884 if (ufshcd_is_auto_hibern8_supported(hba))
885 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
886 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
888 ufs_mtk_setup_clk_gating(hba);
893 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
894 enum ufs_notify_change_status stage)
900 ret = ufs_mtk_pre_link(hba);
903 ret = ufs_mtk_post_link(hba);
913 static int ufs_mtk_device_reset(struct ufs_hba *hba)
915 struct arm_smccc_res res;
917 /* disable hba before device reset */
918 ufshcd_hba_stop(hba);
920 ufs_mtk_device_reset_ctrl(0, res);
923 * The reset signal is active low. UFS devices shall detect
924 * more than or equal to 1us of positive or negative RST_n
927 * To be on safe side, keep the reset low for at least 10us.
929 usleep_range(10, 15);
931 ufs_mtk_device_reset_ctrl(1, res);
933 /* Some devices may need time to respond to rst_n */
934 usleep_range(10000, 15000);
936 dev_info(hba->dev, "device reset done\n");
941 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
945 err = ufshcd_hba_enable(hba);
949 err = ufs_mtk_unipro_set_lpm(hba, false);
953 err = ufshcd_uic_hibern8_exit(hba);
955 ufshcd_set_link_active(hba);
959 err = ufshcd_make_hba_operational(hba);
966 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
970 err = ufs_mtk_unipro_set_lpm(hba, true);
972 /* Resume UniPro state for following error recovery */
973 ufs_mtk_unipro_set_lpm(hba, false);
980 static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
982 if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
985 if (lpm && !hba->vreg_info.vcc->enabled)
986 regulator_set_mode(hba->vreg_info.vccq2->reg,
987 REGULATOR_MODE_IDLE);
989 regulator_set_mode(hba->vreg_info.vccq2->reg,
990 REGULATOR_MODE_NORMAL);
993 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
998 /* disable auto-hibern8 */
999 spin_lock_irqsave(hba->host->host_lock, flags);
1000 ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1001 spin_unlock_irqrestore(hba->host->host_lock, flags);
1003 /* wait host return to idle state when auto-hibern8 off */
1004 ufs_mtk_wait_idle_state(hba, 5);
1006 ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1008 dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1011 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1012 enum ufs_notify_change_status status)
1015 struct arm_smccc_res res;
1017 if (status == PRE_CHANGE) {
1018 if (!ufshcd_is_auto_hibern8_supported(hba))
1020 ufs_mtk_auto_hibern8_disable(hba);
1024 if (ufshcd_is_link_hibern8(hba)) {
1025 err = ufs_mtk_link_set_lpm(hba);
1030 if (!ufshcd_is_link_active(hba)) {
1032 * Make sure no error will be returned to prevent
1033 * ufshcd_suspend() re-enabling regulators while vreg is still
1034 * in low-power mode.
1036 ufs_mtk_vreg_set_lpm(hba, true);
1037 err = ufs_mtk_mphy_power_on(hba, false);
1042 if (ufshcd_is_link_off(hba))
1043 ufs_mtk_device_reset_ctrl(0, res);
1048 * Set link as off state enforcedly to trigger
1049 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1050 * for completed host reset.
1052 ufshcd_set_link_off(hba);
1056 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1060 err = ufs_mtk_mphy_power_on(hba, true);
1064 ufs_mtk_vreg_set_lpm(hba, false);
1066 if (ufshcd_is_link_hibern8(hba)) {
1067 err = ufs_mtk_link_set_hpm(hba);
1074 return ufshcd_link_recovery(hba);
1077 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1079 ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
1081 ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1083 ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1084 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1087 /* Direct debugging information to REG_MTK_PROBE */
1088 ufs_mtk_dbg_sel(hba);
1089 ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1092 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1094 struct ufs_dev_info *dev_info = &hba->dev_info;
1095 u16 mid = dev_info->wmanufacturerid;
1097 if (mid == UFS_VENDOR_SAMSUNG)
1098 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1101 * Decide waiting time before gating reference clock and
1102 * after ungating reference clock according to vendors'
1105 if (mid == UFS_VENDOR_SAMSUNG)
1106 ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1107 else if (mid == UFS_VENDOR_SKHYNIX)
1108 ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1109 else if (mid == UFS_VENDOR_TOSHIBA)
1110 ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1112 ufs_mtk_setup_ref_clk_wait_us(hba,
1113 REFCLK_DEFAULT_WAIT_US);
1118 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1120 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1122 if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1123 (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1124 hba->vreg_info.vcc->always_on = true;
1126 * VCC will be kept always-on thus we don't
1127 * need any delay during regulator operations
1129 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1130 UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1134 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1135 enum ufs_event_type evt, void *data)
1137 unsigned int val = *(u32 *)data;
1139 trace_ufs_mtk_event(evt, val);
1143 * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1145 * The variant operations configure the necessary controller and PHY
1146 * handshake during initialization.
1148 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1149 .name = "mediatek.ufshci",
1150 .init = ufs_mtk_init,
1151 .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1152 .setup_clocks = ufs_mtk_setup_clocks,
1153 .hce_enable_notify = ufs_mtk_hce_enable_notify,
1154 .link_startup_notify = ufs_mtk_link_startup_notify,
1155 .pwr_change_notify = ufs_mtk_pwr_change_notify,
1156 .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
1157 .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
1158 .suspend = ufs_mtk_suspend,
1159 .resume = ufs_mtk_resume,
1160 .dbg_register_dump = ufs_mtk_dbg_register_dump,
1161 .device_reset = ufs_mtk_device_reset,
1162 .event_notify = ufs_mtk_event_notify,
1166 * ufs_mtk_probe - probe routine of the driver
1167 * @pdev: pointer to Platform device handle
1169 * Return zero for success and non-zero for failure
1171 static int ufs_mtk_probe(struct platform_device *pdev)
1174 struct device *dev = &pdev->dev;
1175 struct device_node *reset_node;
1176 struct platform_device *reset_pdev;
1177 struct device_link *link;
1179 reset_node = of_find_compatible_node(NULL, NULL,
1182 dev_notice(dev, "find ti,syscon-reset fail\n");
1185 reset_pdev = of_find_device_by_node(reset_node);
1187 dev_notice(dev, "find reset_pdev fail\n");
1190 link = device_link_add(dev, &reset_pdev->dev,
1191 DL_FLAG_AUTOPROBE_CONSUMER);
1192 put_device(&reset_pdev->dev);
1194 dev_notice(dev, "add reset device_link fail\n");
1197 /* supplier is not probed */
1198 if (link->status == DL_STATE_DORMANT) {
1199 err = -EPROBE_DEFER;
1204 /* perform generic probe */
1205 err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1209 dev_info(dev, "probe failed %d\n", err);
1211 of_node_put(reset_node);
1216 * ufs_mtk_remove - set driver_data of the device to NULL
1217 * @pdev: pointer to platform device handle
1221 static int ufs_mtk_remove(struct platform_device *pdev)
1223 struct ufs_hba *hba = platform_get_drvdata(pdev);
1225 pm_runtime_get_sync(&(pdev)->dev);
1230 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1231 SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
1232 SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
1233 .prepare = ufshcd_suspend_prepare,
1234 .complete = ufshcd_resume_complete,
1237 static struct platform_driver ufs_mtk_pltform = {
1238 .probe = ufs_mtk_probe,
1239 .remove = ufs_mtk_remove,
1240 .shutdown = ufshcd_pltfrm_shutdown,
1242 .name = "ufshcd-mtk",
1243 .pm = &ufs_mtk_pm_ops,
1244 .of_match_table = ufs_mtk_of_match,
1248 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1249 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1250 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1251 MODULE_LICENSE("GPL v2");
1253 module_platform_driver(ufs_mtk_pltform);