1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
6 #include <linux/kernel.h>
9 #include <linux/delay.h>
12 #include <soc/tegra/mc.h>
14 #include "tegra210-emc.h"
15 #include "tegra210-mc.h"
18 * Enable flags for specifying verbosity.
21 #define STEPS (1 << 1)
22 #define SUB_STEPS (1 << 2)
23 #define PRELOCK (1 << 3)
24 #define PRELOCK_STEPS (1 << 4)
25 #define ACTIVE_EN (1 << 5)
26 #define PRAMP_UP (1 << 6)
27 #define PRAMP_DN (1 << 7)
28 #define EMA_WRITES (1 << 10)
29 #define EMA_UPDATES (1 << 11)
30 #define PER_TRAIN (1 << 16)
31 #define CC_PRINT (1 << 17)
32 #define CCFIFO (1 << 29)
33 #define REGS (1 << 30)
34 #define REG_LISTS (1 << 31)
36 #define emc_dbg(emc, flags, ...) dev_dbg(emc->dev, __VA_ARGS__)
38 #define DVFS_CLOCK_CHANGE_VERSION 21021
39 #define EMC_PRELOCK_VERSION 2101
43 WRITE_TRAINING_SEQUENCE = 2,
44 PERIODIC_TRAINING_SEQUENCE = 3,
49 PERIODIC_TRAINING_UPDATE = 14
53 * PTFV defines - basically just indexes into the per table PTFV array.
55 #define PTFV_DQSOSC_MOVAVG_C0D0U0_INDEX 0
56 #define PTFV_DQSOSC_MOVAVG_C0D0U1_INDEX 1
57 #define PTFV_DQSOSC_MOVAVG_C0D1U0_INDEX 2
58 #define PTFV_DQSOSC_MOVAVG_C0D1U1_INDEX 3
59 #define PTFV_DQSOSC_MOVAVG_C1D0U0_INDEX 4
60 #define PTFV_DQSOSC_MOVAVG_C1D0U1_INDEX 5
61 #define PTFV_DQSOSC_MOVAVG_C1D1U0_INDEX 6
62 #define PTFV_DQSOSC_MOVAVG_C1D1U1_INDEX 7
63 #define PTFV_DVFS_SAMPLES_INDEX 9
64 #define PTFV_MOVAVG_WEIGHT_INDEX 10
65 #define PTFV_CONFIG_CTRL_INDEX 11
67 #define PTFV_CONFIG_CTRL_USE_PREVIOUS_EMA (1 << 0)
70 * Do arithmetic in fixed point.
72 #define MOVAVG_PRECISION_FACTOR 100
75 * The division portion of the average operation.
77 #define __AVERAGE_PTFV(dev) \
78 ({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] = \
79 next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] / \
80 next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
83 * Convert val to fixed point and add it to the temporary average.
85 #define __INCREMENT_PTFV(dev, val) \
86 ({ next->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] += \
87 ((val) * MOVAVG_PRECISION_FACTOR); })
90 * Convert a moving average back to integral form and return the value.
92 #define __MOVAVG_AC(timing, dev) \
93 ((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX] / \
94 MOVAVG_PRECISION_FACTOR)
96 /* Weighted update. */
97 #define __WEIGHTED_UPDATE_PTFV(dev, nval) \
99 int w = PTFV_MOVAVG_WEIGHT_INDEX; \
100 int dqs = PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX; \
102 next->ptfv_list[dqs] = \
103 ((nval * MOVAVG_PRECISION_FACTOR) + \
104 (next->ptfv_list[dqs] * \
105 next->ptfv_list[w])) / \
106 (next->ptfv_list[w] + 1); \
108 emc_dbg(emc, EMA_UPDATES, "%s: (s=%lu) EMA: %u\n", \
109 __stringify(dev), nval, next->ptfv_list[dqs]); \
112 /* Access a particular average. */
113 #define __MOVAVG(timing, dev) \
114 ((timing)->ptfv_list[PTFV_DQSOSC_MOVAVG_ ## dev ## _INDEX])
116 static u32 update_clock_tree_delay(struct tegra210_emc *emc, int type)
118 bool periodic_training_update = type == PERIODIC_TRAINING_UPDATE;
119 struct tegra210_emc_timing *last = emc->last;
120 struct tegra210_emc_timing *next = emc->next;
121 u32 last_timing_rate_mhz = last->rate / 1000;
122 u32 next_timing_rate_mhz = next->rate / 1000;
123 bool dvfs_update = type == DVFS_UPDATE;
124 s32 tdel = 0, tmdel = 0, adel = 0;
125 bool dvfs_pt1 = type == DVFS_PT1;
126 unsigned long cval = 0;
127 u32 temp[2][2], value;
133 if (dvfs_pt1 || periodic_training_update) {
134 value = tegra210_emc_mrr_read(emc, 2, 19);
136 for (i = 0; i < emc->num_channels; i++) {
137 temp[i][0] = (value & 0x00ff) << 8;
138 temp[i][1] = (value & 0xff00) << 0;
145 value = tegra210_emc_mrr_read(emc, 2, 18);
147 for (i = 0; i < emc->num_channels; i++) {
148 temp[i][0] |= (value & 0x00ff) >> 0;
149 temp[i][1] |= (value & 0xff00) >> 8;
154 if (dvfs_pt1 || periodic_training_update) {
155 cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
157 cval /= last_timing_rate_mhz * 2 * temp[0][0];
161 __INCREMENT_PTFV(C0D0U0, cval);
162 else if (dvfs_update)
163 __AVERAGE_PTFV(C0D0U0);
164 else if (periodic_training_update)
165 __WEIGHTED_UPDATE_PTFV(C0D0U0, cval);
167 if (dvfs_update || periodic_training_update) {
168 tdel = next->current_dram_clktree[C0D0U0] -
169 __MOVAVG_AC(next, C0D0U0);
170 tmdel = (tdel < 0) ? -1 * tdel : tdel;
173 if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
175 next->current_dram_clktree[C0D0U0] =
176 __MOVAVG_AC(next, C0D0U0);
179 if (dvfs_pt1 || periodic_training_update) {
180 cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
182 cval /= last_timing_rate_mhz * 2 * temp[0][1];
186 __INCREMENT_PTFV(C0D0U1, cval);
187 else if (dvfs_update)
188 __AVERAGE_PTFV(C0D0U1);
189 else if (periodic_training_update)
190 __WEIGHTED_UPDATE_PTFV(C0D0U1, cval);
192 if (dvfs_update || periodic_training_update) {
193 tdel = next->current_dram_clktree[C0D0U1] -
194 __MOVAVG_AC(next, C0D0U1);
195 tmdel = (tdel < 0) ? -1 * tdel : tdel;
200 if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
202 next->current_dram_clktree[C0D0U1] =
203 __MOVAVG_AC(next, C0D0U1);
206 if (emc->num_channels > 1) {
207 if (dvfs_pt1 || periodic_training_update) {
208 cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
210 cval /= last_timing_rate_mhz * 2 * temp[1][0];
214 __INCREMENT_PTFV(C1D0U0, cval);
215 else if (dvfs_update)
216 __AVERAGE_PTFV(C1D0U0);
217 else if (periodic_training_update)
218 __WEIGHTED_UPDATE_PTFV(C1D0U0, cval);
220 if (dvfs_update || periodic_training_update) {
221 tdel = next->current_dram_clktree[C1D0U0] -
222 __MOVAVG_AC(next, C1D0U0);
223 tmdel = (tdel < 0) ? -1 * tdel : tdel;
228 if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
230 next->current_dram_clktree[C1D0U0] =
231 __MOVAVG_AC(next, C1D0U0);
234 if (dvfs_pt1 || periodic_training_update) {
235 cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
237 cval /= last_timing_rate_mhz * 2 * temp[1][1];
241 __INCREMENT_PTFV(C1D0U1, cval);
242 else if (dvfs_update)
243 __AVERAGE_PTFV(C1D0U1);
244 else if (periodic_training_update)
245 __WEIGHTED_UPDATE_PTFV(C1D0U1, cval);
247 if (dvfs_update || periodic_training_update) {
248 tdel = next->current_dram_clktree[C1D0U1] -
249 __MOVAVG_AC(next, C1D0U1);
250 tmdel = (tdel < 0) ? -1 * tdel : tdel;
255 if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
257 next->current_dram_clktree[C1D0U1] =
258 __MOVAVG_AC(next, C1D0U1);
262 if (emc->num_devices < 2)
268 if (dvfs_pt1 || periodic_training_update) {
269 value = tegra210_emc_mrr_read(emc, 1, 19);
271 for (i = 0; i < emc->num_channels; i++) {
272 temp[i][0] = (value & 0x00ff) << 8;
273 temp[i][1] = (value & 0xff00) << 0;
280 value = tegra210_emc_mrr_read(emc, 2, 18);
282 for (i = 0; i < emc->num_channels; i++) {
283 temp[i][0] |= (value & 0x00ff) >> 0;
284 temp[i][1] |= (value & 0xff00) >> 8;
289 if (dvfs_pt1 || periodic_training_update) {
290 cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
292 cval /= last_timing_rate_mhz * 2 * temp[0][0];
296 __INCREMENT_PTFV(C0D1U0, cval);
297 else if (dvfs_update)
298 __AVERAGE_PTFV(C0D1U0);
299 else if (periodic_training_update)
300 __WEIGHTED_UPDATE_PTFV(C0D1U0, cval);
302 if (dvfs_update || periodic_training_update) {
303 tdel = next->current_dram_clktree[C0D1U0] -
304 __MOVAVG_AC(next, C0D1U0);
305 tmdel = (tdel < 0) ? -1 * tdel : tdel;
310 if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
312 next->current_dram_clktree[C0D1U0] =
313 __MOVAVG_AC(next, C0D1U0);
316 if (dvfs_pt1 || periodic_training_update) {
317 cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
319 cval /= last_timing_rate_mhz * 2 * temp[0][1];
323 __INCREMENT_PTFV(C0D1U1, cval);
324 else if (dvfs_update)
325 __AVERAGE_PTFV(C0D1U1);
326 else if (periodic_training_update)
327 __WEIGHTED_UPDATE_PTFV(C0D1U1, cval);
329 if (dvfs_update || periodic_training_update) {
330 tdel = next->current_dram_clktree[C0D1U1] -
331 __MOVAVG_AC(next, C0D1U1);
332 tmdel = (tdel < 0) ? -1 * tdel : tdel;
337 if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
339 next->current_dram_clktree[C0D1U1] =
340 __MOVAVG_AC(next, C0D1U1);
343 if (emc->num_channels > 1) {
344 if (dvfs_pt1 || periodic_training_update) {
345 cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
347 cval /= last_timing_rate_mhz * 2 * temp[1][0];
351 __INCREMENT_PTFV(C1D1U0, cval);
352 else if (dvfs_update)
353 __AVERAGE_PTFV(C1D1U0);
354 else if (periodic_training_update)
355 __WEIGHTED_UPDATE_PTFV(C1D1U0, cval);
357 if (dvfs_update || periodic_training_update) {
358 tdel = next->current_dram_clktree[C1D1U0] -
359 __MOVAVG_AC(next, C1D1U0);
360 tmdel = (tdel < 0) ? -1 * tdel : tdel;
365 if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
367 next->current_dram_clktree[C1D1U0] =
368 __MOVAVG_AC(next, C1D1U0);
371 if (dvfs_pt1 || periodic_training_update) {
372 cval = tegra210_emc_actual_osc_clocks(last->run_clocks);
374 cval /= last_timing_rate_mhz * 2 * temp[1][1];
378 __INCREMENT_PTFV(C1D1U1, cval);
379 else if (dvfs_update)
380 __AVERAGE_PTFV(C1D1U1);
381 else if (periodic_training_update)
382 __WEIGHTED_UPDATE_PTFV(C1D1U1, cval);
384 if (dvfs_update || periodic_training_update) {
385 tdel = next->current_dram_clktree[C1D1U1] -
386 __MOVAVG_AC(next, C1D1U1);
387 tmdel = (tdel < 0) ? -1 * tdel : tdel;
392 if (tmdel * 128 * next_timing_rate_mhz / 1000000 >
394 next->current_dram_clktree[C1D1U1] =
395 __MOVAVG_AC(next, C1D1U1);
403 static u32 periodic_compensation_handler(struct tegra210_emc *emc, u32 type,
404 struct tegra210_emc_timing *last,
405 struct tegra210_emc_timing *next)
407 #define __COPY_EMA(nt, lt, dev) \
408 ({ __MOVAVG(nt, dev) = __MOVAVG(lt, dev) * \
409 (nt)->ptfv_list[PTFV_DVFS_SAMPLES_INDEX]; })
411 u32 i, adel = 0, samples = next->ptfv_list[PTFV_DVFS_SAMPLES_INDEX];
414 delay = tegra210_emc_actual_osc_clocks(last->run_clocks);
416 delay = 2 + (delay / last->rate);
418 if (!next->periodic_training)
421 if (type == DVFS_SEQUENCE) {
422 if (last->periodic_training &&
423 (next->ptfv_list[PTFV_CONFIG_CTRL_INDEX] &
424 PTFV_CONFIG_CTRL_USE_PREVIOUS_EMA)) {
426 * If the previous frequency was using periodic
427 * calibration then we can reuse the previous
428 * frequencies EMA data.
430 __COPY_EMA(next, last, C0D0U0);
431 __COPY_EMA(next, last, C0D0U1);
432 __COPY_EMA(next, last, C1D0U0);
433 __COPY_EMA(next, last, C1D0U1);
434 __COPY_EMA(next, last, C0D1U0);
435 __COPY_EMA(next, last, C0D1U1);
436 __COPY_EMA(next, last, C1D1U0);
437 __COPY_EMA(next, last, C1D1U1);
440 __MOVAVG(next, C0D0U0) = 0;
441 __MOVAVG(next, C0D0U1) = 0;
442 __MOVAVG(next, C1D0U0) = 0;
443 __MOVAVG(next, C1D0U1) = 0;
444 __MOVAVG(next, C0D1U0) = 0;
445 __MOVAVG(next, C0D1U1) = 0;
446 __MOVAVG(next, C1D1U0) = 0;
447 __MOVAVG(next, C1D1U1) = 0;
449 for (i = 0; i < samples; i++) {
450 tegra210_emc_start_periodic_compensation(emc);
454 * Generate next sample of data.
456 adel = update_clock_tree_delay(emc, DVFS_PT1);
461 * Seems like it should be part of the
462 * 'if (last_timing->periodic_training)' conditional
463 * since is already done for the else clause.
465 adel = update_clock_tree_delay(emc, DVFS_UPDATE);
468 if (type == PERIODIC_TRAINING_SEQUENCE) {
469 tegra210_emc_start_periodic_compensation(emc);
472 adel = update_clock_tree_delay(emc, PERIODIC_TRAINING_UPDATE);
478 static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc)
480 u32 emc_cfg, emc_cfg_o, emc_cfg_update, del, value;
482 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
483 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
484 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2,
485 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3,
486 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0,
487 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1,
488 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2,
489 EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3,
493 struct tegra210_emc_timing *last = emc->last;
494 unsigned int items = ARRAY_SIZE(list), i;
497 if (last->periodic_training) {
498 emc_dbg(emc, PER_TRAIN, "Periodic training starting\n");
500 value = emc_readl(emc, EMC_DBG);
501 emc_cfg_o = emc_readl(emc, EMC_CFG);
502 emc_cfg = emc_cfg_o & ~(EMC_CFG_DYN_SELF_REF |
504 EMC_CFG_DRAM_CLKSTOP_PD |
505 EMC_CFG_DRAM_CLKSTOP_PD);
509 * 1. Power optimizations should be off.
511 emc_writel(emc, emc_cfg, EMC_CFG);
513 /* Does emc_timing_update() for above changes. */
514 tegra210_emc_dll_disable(emc);
516 for (i = 0; i < emc->num_channels; i++)
517 tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
518 EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK,
521 for (i = 0; i < emc->num_channels; i++)
522 tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
523 EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,
526 emc_cfg_update = value = emc_readl(emc, EMC_CFG_UPDATE);
527 value &= ~EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_MASK;
528 value |= (2 << EMC_CFG_UPDATE_UPDATE_DLL_IN_UPDATE_SHIFT);
529 emc_writel(emc, value, EMC_CFG_UPDATE);
532 * 2. osc kick off - this assumes training and dvfs have set
535 tegra210_emc_start_periodic_compensation(emc);
538 * 3. Let dram capture its clock tree delays.
540 delay = tegra210_emc_actual_osc_clocks(last->run_clocks);
542 delay /= last->rate + 1;
546 * 4. Check delta wrt previous values (save value if margin
547 * exceeds what is set in table).
549 del = periodic_compensation_handler(emc,
550 PERIODIC_TRAINING_SEQUENCE,
554 * 5. Apply compensation w.r.t. trained values (if clock tree
555 * has drifted more than the set margin).
557 if (last->tree_margin < ((del * 128 * (last->rate / 1000)) / 1000000)) {
558 for (i = 0; i < items; i++) {
559 value = tegra210_emc_compensate(last, list[i]);
560 emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n",
562 emc_writel(emc, value, list[i]);
566 emc_writel(emc, emc_cfg_o, EMC_CFG);
569 * 6. Timing update actally applies the new trimmers.
571 tegra210_emc_timing_update(emc);
573 /* 6.1. Restore the UPDATE_DLL_IN_UPDATE field. */
574 emc_writel(emc, emc_cfg_update, EMC_CFG_UPDATE);
576 /* 6.2. Restore the DLL. */
577 tegra210_emc_dll_enable(emc);
584 * Do the clock change sequence.
586 static void tegra210_emc_r21021_set_clock(struct tegra210_emc *emc, u32 clksrc)
588 /* state variables */
589 static bool fsp_for_next_freq;
590 /* constant configuration parameters */
591 const bool save_restore_clkstop_pd = true;
592 const u32 zqcal_before_cc_cutoff = 2400;
593 const bool cya_allow_ref_cc = false;
594 const bool cya_issue_pc_ref = false;
595 const bool opt_cc_short_zcal = true;
596 const bool ref_b4_sref_en = false;
597 const u32 tZQCAL_lpddr4 = 1000000;
598 const bool opt_short_zcal = true;
599 const bool opt_do_sw_qrst = true;
600 const u32 opt_dvfs_mode = MAN_SR;
602 * This is the timing table for the source frequency. It does _not_
603 * necessarily correspond to the actual timing values in the EMC at the
604 * moment. If the boot BCT differs from the table then this can happen.
605 * However, we need it for accessing the dram_timings (which are not
606 * really registers) array for the current frequency.
608 struct tegra210_emc_timing *fake, *last = emc->last, *next = emc->next;
609 u32 tRTM, RP_war, R2P_war, TRPab_war, deltaTWATM, W2P_war, tRPST;
610 u32 mr13_flip_fspwr, mr13_flip_fspop, ramp_up_wait, ramp_down_wait;
611 u32 zq_wait_long, zq_latch_dvfs_wait_time, tZQCAL_lpddr4_fc_adj;
612 u32 emc_auto_cal_config, auto_cal_en, emc_cfg, emc_sel_dpd_ctrl;
613 u32 tFC_lpddr4 = 1000 * next->dram_timings[T_FC_LPDDR4];
614 u32 bg_reg_mode_change, enable_bglp_reg, enable_bg_reg;
615 bool opt_zcal_en_cc = false, is_lpddr3 = false;
616 bool compensate_trimmer_applicable = false;
617 u32 emc_dbg, emc_cfg_pipe_clk, emc_pin;
618 u32 src_clk_period, dst_clk_period; /* in picoseconds */
619 bool shared_zq_resistor = false;
620 u32 value, dram_type;
621 u32 opt_dll_mode = 0;
625 emc_dbg(emc, INFO, "Running clock change.\n");
627 /* XXX fake == last */
628 fake = tegra210_emc_find_timing(emc, last->rate * 1000UL);
629 fsp_for_next_freq = !fsp_for_next_freq;
631 value = emc_readl(emc, EMC_FBIO_CFG5) & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
632 dram_type = value >> EMC_FBIO_CFG5_DRAM_TYPE_SHIFT;
634 if (last->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX] & BIT(31))
635 shared_zq_resistor = true;
637 if ((next->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0 &&
638 last->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0) ||
639 dram_type == DRAM_TYPE_LPDDR4)
640 opt_zcal_en_cc = true;
642 if (dram_type == DRAM_TYPE_DDR3)
643 opt_dll_mode = tegra210_emc_get_dll_state(next);
645 if ((next->burst_regs[EMC_FBIO_CFG5_INDEX] & BIT(25)) &&
646 (dram_type == DRAM_TYPE_LPDDR2))
649 emc_readl(emc, EMC_CFG);
650 emc_readl(emc, EMC_AUTO_CAL_CONFIG);
652 src_clk_period = 1000000000 / last->rate;
653 dst_clk_period = 1000000000 / next->rate;
655 if (dst_clk_period <= zqcal_before_cc_cutoff)
656 tZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4 - tFC_lpddr4;
658 tZQCAL_lpddr4_fc_adj = tZQCAL_lpddr4;
660 tZQCAL_lpddr4_fc_adj /= dst_clk_period;
662 emc_dbg = emc_readl(emc, EMC_DBG);
663 emc_pin = emc_readl(emc, EMC_PIN);
664 emc_cfg_pipe_clk = emc_readl(emc, EMC_CFG_PIPE_CLK);
666 emc_cfg = next->burst_regs[EMC_CFG_INDEX];
667 emc_cfg &= ~(EMC_CFG_DYN_SELF_REF | EMC_CFG_DRAM_ACPD |
668 EMC_CFG_DRAM_CLKSTOP_SR | EMC_CFG_DRAM_CLKSTOP_PD);
669 emc_sel_dpd_ctrl = next->emc_sel_dpd_ctrl;
670 emc_sel_dpd_ctrl &= ~(EMC_SEL_DPD_CTRL_CLK_SEL_DPD_EN |
671 EMC_SEL_DPD_CTRL_CA_SEL_DPD_EN |
672 EMC_SEL_DPD_CTRL_RESET_SEL_DPD_EN |
673 EMC_SEL_DPD_CTRL_ODT_SEL_DPD_EN |
674 EMC_SEL_DPD_CTRL_DATA_SEL_DPD_EN);
676 emc_dbg(emc, INFO, "Clock change version: %d\n",
677 DVFS_CLOCK_CHANGE_VERSION);
678 emc_dbg(emc, INFO, "DRAM type = %d\n", dram_type);
679 emc_dbg(emc, INFO, "DRAM dev #: %u\n", emc->num_devices);
680 emc_dbg(emc, INFO, "Next EMC clksrc: 0x%08x\n", clksrc);
681 emc_dbg(emc, INFO, "DLL clksrc: 0x%08x\n", next->dll_clk_src);
682 emc_dbg(emc, INFO, "last rate: %u, next rate %u\n", last->rate,
684 emc_dbg(emc, INFO, "last period: %u, next period: %u\n",
685 src_clk_period, dst_clk_period);
686 emc_dbg(emc, INFO, " shared_zq_resistor: %d\n", !!shared_zq_resistor);
687 emc_dbg(emc, INFO, " num_channels: %u\n", emc->num_channels);
688 emc_dbg(emc, INFO, " opt_dll_mode: %d\n", opt_dll_mode);
692 * Pre DVFS SW sequence.
694 emc_dbg(emc, STEPS, "Step 1\n");
695 emc_dbg(emc, STEPS, "Step 1.1: Disable DLL temporarily.\n");
697 value = emc_readl(emc, EMC_CFG_DIG_DLL);
698 value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
699 emc_writel(emc, value, EMC_CFG_DIG_DLL);
701 tegra210_emc_timing_update(emc);
703 for (i = 0; i < emc->num_channels; i++)
704 tegra210_emc_wait_for_update(emc, i, EMC_CFG_DIG_DLL,
705 EMC_CFG_DIG_DLL_CFG_DLL_EN, 0);
707 emc_dbg(emc, STEPS, "Step 1.2: Disable AUTOCAL temporarily.\n");
709 emc_auto_cal_config = next->emc_auto_cal_config;
710 auto_cal_en = emc_auto_cal_config & EMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
711 emc_auto_cal_config &= ~EMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
712 emc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_MEASURE_STALL;
713 emc_auto_cal_config |= EMC_AUTO_CAL_CONFIG_AUTO_CAL_UPDATE_STALL;
714 emc_auto_cal_config |= auto_cal_en;
715 emc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
716 emc_readl(emc, EMC_AUTO_CAL_CONFIG); /* Flush write. */
718 emc_dbg(emc, STEPS, "Step 1.3: Disable other power features.\n");
720 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
721 emc_writel(emc, emc_cfg, EMC_CFG);
722 emc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
723 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
725 if (next->periodic_training) {
726 tegra210_emc_reset_dram_clktree_values(next);
728 for (i = 0; i < emc->num_channels; i++)
729 tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
730 EMC_EMC_STATUS_DRAM_IN_POWERDOWN_MASK,
733 for (i = 0; i < emc->num_channels; i++)
734 tegra210_emc_wait_for_update(emc, i, EMC_EMC_STATUS,
735 EMC_EMC_STATUS_DRAM_IN_SELF_REFRESH_MASK,
738 tegra210_emc_start_periodic_compensation(emc);
740 delay = 1000 * tegra210_emc_actual_osc_clocks(last->run_clocks);
741 udelay((delay / last->rate) + 2);
743 value = periodic_compensation_handler(emc, DVFS_SEQUENCE, fake,
745 value = (value * 128 * next->rate / 1000) / 1000000;
747 if (next->periodic_training && value > next->tree_margin)
748 compensate_trimmer_applicable = true;
751 emc_writel(emc, EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
752 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
753 emc_writel(emc, emc_cfg, EMC_CFG);
754 emc_writel(emc, emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
755 emc_writel(emc, emc_cfg_pipe_clk | EMC_CFG_PIPE_CLK_CLK_ALWAYS_ON,
757 emc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp &
758 ~EMC_FDPD_CTRL_CMD_NO_RAMP_CMD_DPD_NO_RAMP_ENABLE,
759 EMC_FDPD_CTRL_CMD_NO_RAMP);
762 ((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
763 EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) ^
764 (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
765 EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD)) ||
766 ((next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
767 EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) ^
768 (last->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
769 EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD));
771 (next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
772 EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD) == 0;
774 (next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
775 EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD) == 0;
777 if (bg_reg_mode_change) {
779 emc_writel(emc, last->burst_regs
780 [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
781 ~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,
782 EMC_PMACRO_BG_BIAS_CTRL_0);
785 emc_writel(emc, last->burst_regs
786 [EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
787 ~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,
788 EMC_PMACRO_BG_BIAS_CTRL_0);
791 /* Check if we need to turn on VREF generator. */
792 if ((((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
793 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 0) &&
794 ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
795 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 1)) ||
796 (((last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
797 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) == 0) &&
798 ((next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
799 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) != 0))) {
801 next->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
802 u32 last_pad_tx_ctrl =
803 last->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
804 u32 next_dq_e_ivref, next_dqs_e_ivref;
806 next_dqs_e_ivref = pad_tx_ctrl &
807 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF;
808 next_dq_e_ivref = pad_tx_ctrl &
809 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF;
810 value = (last_pad_tx_ctrl &
811 ~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF &
812 ~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) |
813 next_dq_e_ivref | next_dqs_e_ivref;
814 emc_writel(emc, value, EMC_PMACRO_DATA_PAD_TX_CTRL);
816 } else if (bg_reg_mode_change) {
820 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
826 emc_dbg(emc, STEPS, "Step 2\n");
828 if (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] &
829 EMC_CFG_DIG_DLL_CFG_DLL_EN) {
830 emc_dbg(emc, INFO, "Prelock enabled for target frequency.\n");
831 value = tegra210_emc_dll_prelock(emc, clksrc);
832 emc_dbg(emc, INFO, "DLL out: 0x%03x\n", value);
834 emc_dbg(emc, INFO, "Disabling DLL for target frequency.\n");
835 tegra210_emc_dll_disable(emc);
840 * Prepare autocal for the clock change.
842 emc_dbg(emc, STEPS, "Step 3\n");
844 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
845 emc_writel(emc, next->emc_auto_cal_config2, EMC_AUTO_CAL_CONFIG2);
846 emc_writel(emc, next->emc_auto_cal_config3, EMC_AUTO_CAL_CONFIG3);
847 emc_writel(emc, next->emc_auto_cal_config4, EMC_AUTO_CAL_CONFIG4);
848 emc_writel(emc, next->emc_auto_cal_config5, EMC_AUTO_CAL_CONFIG5);
849 emc_writel(emc, next->emc_auto_cal_config6, EMC_AUTO_CAL_CONFIG6);
850 emc_writel(emc, next->emc_auto_cal_config7, EMC_AUTO_CAL_CONFIG7);
851 emc_writel(emc, next->emc_auto_cal_config8, EMC_AUTO_CAL_CONFIG8);
852 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
854 emc_auto_cal_config |= (EMC_AUTO_CAL_CONFIG_AUTO_CAL_COMPUTE_START |
856 emc_writel(emc, emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
860 * Update EMC_CFG. (??)
862 emc_dbg(emc, STEPS, "Step 4\n");
864 if (src_clk_period > 50000 && dram_type == DRAM_TYPE_LPDDR4)
865 ccfifo_writel(emc, 1, EMC_SELF_REF, 0);
867 emc_writel(emc, next->emc_cfg_2, EMC_CFG_2);
871 * Prepare reference variables for ZQCAL regs.
873 emc_dbg(emc, STEPS, "Step 5\n");
875 if (dram_type == DRAM_TYPE_LPDDR4)
876 zq_wait_long = max((u32)1, div_o3(1000000, dst_clk_period));
877 else if (dram_type == DRAM_TYPE_LPDDR2 || is_lpddr3)
878 zq_wait_long = max(next->min_mrs_wait,
879 div_o3(360000, dst_clk_period)) + 4;
880 else if (dram_type == DRAM_TYPE_DDR3)
881 zq_wait_long = max((u32)256,
882 div_o3(320000, dst_clk_period) + 2);
888 * Training code - removed.
890 emc_dbg(emc, STEPS, "Step 6\n");
894 * Program FSP reference registers and send MRWs to new FSPWR.
896 emc_dbg(emc, STEPS, "Step 7\n");
897 emc_dbg(emc, SUB_STEPS, "Step 7.1: Bug 200024907 - Patch RP R2P");
900 if (dram_type == DRAM_TYPE_LPDDR4) {
903 if (src_clk_period >= 1000000 / 1866) /* 535.91 ps */
906 if (src_clk_period >= 1000000 / 1600) /* 625.00 ps */
909 if (src_clk_period >= 1000000 / 1333) /* 750.19 ps */
912 if (src_clk_period >= 1000000 / 1066) /* 938.09 ps */
915 deltaTWATM = max_t(u32, div_o3(7500, src_clk_period), 8);
918 * Originally there was a + .5 in the tRPST calculation.
919 * However since we can't do FP in the kernel and the tRTM
920 * computation was in a floating point ceiling function, adding
921 * one to tRTP should be ok. There is no other source of non
922 * integer values, so the result was always going to be
923 * something for the form: f_ceil(N + .5) = N + 1;
925 tRPST = (last->emc_mrw & 0x80) >> 7;
926 tRTM = fake->dram_timings[RL] + div_o3(3600, src_clk_period) +
927 max_t(u32, div_o3(7500, src_clk_period), 8) + tRPST +
930 emc_dbg(emc, INFO, "tRTM = %u, EMC_RP = %u\n", tRTM,
931 next->burst_regs[EMC_RP_INDEX]);
933 if (last->burst_regs[EMC_RP_INDEX] < tRTM) {
934 if (tRTM > (last->burst_regs[EMC_R2P_INDEX] +
935 last->burst_regs[EMC_RP_INDEX])) {
936 R2P_war = tRTM - last->burst_regs[EMC_RP_INDEX];
937 RP_war = last->burst_regs[EMC_RP_INDEX];
938 TRPab_war = last->burst_regs[EMC_TRPAB_INDEX];
942 last->burst_regs[EMC_RP_INDEX] - 63;
944 if (TRPab_war < RP_war)
950 R2P_war = last->burst_regs[EMC_R2P_INDEX];
951 RP_war = last->burst_regs[EMC_RP_INDEX];
952 TRPab_war = last->burst_regs[EMC_TRPAB_INDEX];
955 if (RP_war < deltaTWATM) {
956 W2P_war = last->burst_regs[EMC_W2P_INDEX]
957 + deltaTWATM - RP_war;
959 RP_war = RP_war + W2P_war - 63;
960 if (TRPab_war < RP_war)
965 W2P_war = last->burst_regs[
969 if ((last->burst_regs[EMC_W2P_INDEX] ^ W2P_war) ||
970 (last->burst_regs[EMC_R2P_INDEX] ^ R2P_war) ||
971 (last->burst_regs[EMC_RP_INDEX] ^ RP_war) ||
972 (last->burst_regs[EMC_TRPAB_INDEX] ^ TRPab_war)) {
973 emc_writel(emc, RP_war, EMC_RP);
974 emc_writel(emc, R2P_war, EMC_R2P);
975 emc_writel(emc, W2P_war, EMC_W2P);
976 emc_writel(emc, TRPab_war, EMC_TRPAB);
979 tegra210_emc_timing_update(emc);
981 emc_dbg(emc, INFO, "Skipped WAR\n");
985 if (!fsp_for_next_freq) {
986 mr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x80;
987 mr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0x00;
989 mr13_flip_fspwr = (next->emc_mrw3 & 0xffffff3f) | 0x40;
990 mr13_flip_fspop = (next->emc_mrw3 & 0xffffff3f) | 0xc0;
993 if (dram_type == DRAM_TYPE_LPDDR4) {
994 emc_writel(emc, mr13_flip_fspwr, EMC_MRW3);
995 emc_writel(emc, next->emc_mrw, EMC_MRW);
996 emc_writel(emc, next->emc_mrw2, EMC_MRW2);
1001 * Program the shadow registers.
1003 emc_dbg(emc, STEPS, "Step 8\n");
1004 emc_dbg(emc, SUB_STEPS, "Writing burst_regs\n");
1006 for (i = 0; i < next->num_burst; i++) {
1007 const u16 *offsets = emc->offsets->burst;
1013 value = next->burst_regs[i];
1014 offset = offsets[i];
1016 if (dram_type != DRAM_TYPE_LPDDR4 &&
1017 (offset == EMC_MRW6 || offset == EMC_MRW7 ||
1018 offset == EMC_MRW8 || offset == EMC_MRW9 ||
1019 offset == EMC_MRW10 || offset == EMC_MRW11 ||
1020 offset == EMC_MRW12 || offset == EMC_MRW13 ||
1021 offset == EMC_MRW14 || offset == EMC_MRW15 ||
1022 offset == EMC_TRAINING_CTRL))
1025 /* Pain... And suffering. */
1026 if (offset == EMC_CFG) {
1027 value &= ~EMC_CFG_DRAM_ACPD;
1028 value &= ~EMC_CFG_DYN_SELF_REF;
1030 if (dram_type == DRAM_TYPE_LPDDR4) {
1031 value &= ~EMC_CFG_DRAM_CLKSTOP_SR;
1032 value &= ~EMC_CFG_DRAM_CLKSTOP_PD;
1034 } else if (offset == EMC_MRS_WAIT_CNT &&
1035 dram_type == DRAM_TYPE_LPDDR2 &&
1036 opt_zcal_en_cc && !opt_cc_short_zcal &&
1038 value = (value & ~(EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK <<
1039 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)) |
1040 ((zq_wait_long & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) <<
1041 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
1042 } else if (offset == EMC_ZCAL_WAIT_CNT &&
1043 dram_type == DRAM_TYPE_DDR3 && opt_zcal_en_cc &&
1044 !opt_cc_short_zcal && opt_short_zcal) {
1045 value = (value & ~(EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK <<
1046 EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_SHIFT)) |
1047 ((zq_wait_long & EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK) <<
1048 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
1049 } else if (offset == EMC_ZCAL_INTERVAL && opt_zcal_en_cc) {
1050 value = 0; /* EMC_ZCAL_INTERVAL reset value. */
1051 } else if (offset == EMC_PMACRO_AUTOCAL_CFG_COMMON) {
1052 value |= EMC_PMACRO_AUTOCAL_CFG_COMMON_E_CAL_BYPASS_DVFS;
1053 } else if (offset == EMC_PMACRO_DATA_PAD_TX_CTRL) {
1054 value &= ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
1055 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC |
1056 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
1057 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
1058 } else if (offset == EMC_PMACRO_CMD_PAD_TX_CTRL) {
1059 value |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
1060 value &= ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
1061 EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC |
1062 EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
1063 EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
1064 } else if (offset == EMC_PMACRO_BRICK_CTRL_RFU1) {
1065 value &= 0xf800f800;
1066 } else if (offset == EMC_PMACRO_COMMON_PAD_TX_CTRL) {
1067 value &= 0xfffffff0;
1070 emc_writel(emc, value, offset);
1073 /* SW addition: do EMC refresh adjustment here. */
1074 tegra210_emc_adjust_timing(emc, next);
1076 if (dram_type == DRAM_TYPE_LPDDR4) {
1077 value = (23 << EMC_MRW_MRW_MA_SHIFT) |
1078 (next->run_clocks & EMC_MRW_MRW_OP_MASK);
1079 emc_writel(emc, value, EMC_MRW);
1082 /* Per channel burst registers. */
1083 emc_dbg(emc, SUB_STEPS, "Writing burst_regs_per_ch\n");
1085 for (i = 0; i < next->num_burst_per_ch; i++) {
1086 const struct tegra210_emc_per_channel_regs *burst =
1087 emc->offsets->burst_per_channel;
1089 if (!burst[i].offset)
1092 if (dram_type != DRAM_TYPE_LPDDR4 &&
1093 (burst[i].offset == EMC_MRW6 ||
1094 burst[i].offset == EMC_MRW7 ||
1095 burst[i].offset == EMC_MRW8 ||
1096 burst[i].offset == EMC_MRW9 ||
1097 burst[i].offset == EMC_MRW10 ||
1098 burst[i].offset == EMC_MRW11 ||
1099 burst[i].offset == EMC_MRW12 ||
1100 burst[i].offset == EMC_MRW13 ||
1101 burst[i].offset == EMC_MRW14 ||
1102 burst[i].offset == EMC_MRW15))
1105 /* Filter out second channel if not in DUAL_CHANNEL mode. */
1106 if (emc->num_channels < 2 && burst[i].bank >= 1)
1109 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
1110 next->burst_reg_per_ch[i], burst[i].offset);
1111 emc_channel_writel(emc, burst[i].bank,
1112 next->burst_reg_per_ch[i],
1117 emc_dbg(emc, SUB_STEPS, "Writing vref_regs\n");
1119 for (i = 0; i < next->vref_num; i++) {
1120 const struct tegra210_emc_per_channel_regs *vref =
1121 emc->offsets->vref_per_channel;
1123 if (!vref[i].offset)
1126 if (emc->num_channels < 2 && vref[i].bank >= 1)
1129 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
1130 next->vref_perch_regs[i], vref[i].offset);
1131 emc_channel_writel(emc, vref[i].bank, next->vref_perch_regs[i],
1136 emc_dbg(emc, SUB_STEPS, "Writing trim_regs\n");
1138 for (i = 0; i < next->num_trim; i++) {
1139 const u16 *offsets = emc->offsets->trim;
1144 if (compensate_trimmer_applicable &&
1145 (offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||
1146 offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||
1147 offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||
1148 offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||
1149 offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||
1150 offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||
1151 offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||
1152 offsets[i] == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||
1153 offsets[i] == EMC_DATA_BRLSHFT_0 ||
1154 offsets[i] == EMC_DATA_BRLSHFT_1)) {
1155 value = tegra210_emc_compensate(next, offsets[i]);
1156 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
1158 emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n",
1159 (u32)(u64)offsets[i], value);
1160 emc_writel(emc, value, offsets[i]);
1162 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
1163 next->trim_regs[i], offsets[i]);
1164 emc_writel(emc, next->trim_regs[i], offsets[i]);
1168 /* Per channel trimmers. */
1169 emc_dbg(emc, SUB_STEPS, "Writing trim_regs_per_ch\n");
1171 for (i = 0; i < next->num_trim_per_ch; i++) {
1172 const struct tegra210_emc_per_channel_regs *trim =
1173 &emc->offsets->trim_per_channel[0];
1174 unsigned int offset;
1176 if (!trim[i].offset)
1179 if (emc->num_channels < 2 && trim[i].bank >= 1)
1182 offset = trim[i].offset;
1184 if (compensate_trimmer_applicable &&
1185 (offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0 ||
1186 offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1 ||
1187 offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2 ||
1188 offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3 ||
1189 offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0 ||
1190 offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1 ||
1191 offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2 ||
1192 offset == EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3 ||
1193 offset == EMC_DATA_BRLSHFT_0 ||
1194 offset == EMC_DATA_BRLSHFT_1)) {
1195 value = tegra210_emc_compensate(next, offset);
1196 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
1198 emc_dbg(emc, EMA_WRITES, "0x%08x <= 0x%08x\n", offset,
1200 emc_channel_writel(emc, trim[i].bank, value, offset);
1202 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
1203 next->trim_perch_regs[i], offset);
1204 emc_channel_writel(emc, trim[i].bank,
1205 next->trim_perch_regs[i], offset);
1209 emc_dbg(emc, SUB_STEPS, "Writing burst_mc_regs\n");
1211 for (i = 0; i < next->num_mc_regs; i++) {
1212 const u16 *offsets = emc->offsets->burst_mc;
1213 u32 *values = next->burst_mc_regs;
1215 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
1216 values[i], offsets[i]);
1217 mc_writel(emc->mc, values[i], offsets[i]);
1220 /* Registers to be programmed on the faster clock. */
1221 if (next->rate < last->rate) {
1222 const u16 *la = emc->offsets->la_scale;
1224 emc_dbg(emc, SUB_STEPS, "Writing la_scale_regs\n");
1226 for (i = 0; i < next->num_up_down; i++) {
1227 emc_dbg(emc, REG_LISTS, "(%u) 0x%08x => 0x%08x\n", i,
1228 next->la_scale_regs[i], la[i]);
1229 mc_writel(emc->mc, next->la_scale_regs[i], la[i]);
1233 /* Flush all the burst register writes. */
1234 mc_readl(emc->mc, MC_EMEM_ADR_CFG);
1240 emc_dbg(emc, STEPS, "Step 9\n");
1242 value = next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX];
1243 value &= ~EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK;
1245 if (dram_type == DRAM_TYPE_LPDDR4) {
1246 emc_writel(emc, 0, EMC_ZCAL_INTERVAL);
1247 emc_writel(emc, value, EMC_ZCAL_WAIT_CNT);
1249 value = emc_dbg | (EMC_DBG_WRITE_MUX_ACTIVE |
1250 EMC_DBG_WRITE_ACTIVE_ONLY);
1252 emc_writel(emc, value, EMC_DBG);
1253 emc_writel(emc, 0, EMC_ZCAL_INTERVAL);
1254 emc_writel(emc, emc_dbg, EMC_DBG);
1259 * LPDDR4 and DDR3 common section.
1261 emc_dbg(emc, STEPS, "Step 10\n");
1263 if (opt_dvfs_mode == MAN_SR || dram_type == DRAM_TYPE_LPDDR4) {
1264 if (dram_type == DRAM_TYPE_LPDDR4)
1265 ccfifo_writel(emc, 0x101, EMC_SELF_REF, 0);
1267 ccfifo_writel(emc, 0x1, EMC_SELF_REF, 0);
1269 if (dram_type == DRAM_TYPE_LPDDR4 &&
1270 dst_clk_period <= zqcal_before_cc_cutoff) {
1271 ccfifo_writel(emc, mr13_flip_fspwr ^ 0x40, EMC_MRW3, 0);
1272 ccfifo_writel(emc, (next->burst_regs[EMC_MRW6_INDEX] &
1274 (last->burst_regs[EMC_MRW6_INDEX] &
1275 0x0000C0C0), EMC_MRW6, 0);
1276 ccfifo_writel(emc, (next->burst_regs[EMC_MRW14_INDEX] &
1278 (last->burst_regs[EMC_MRW14_INDEX] &
1279 0x00003838), EMC_MRW14, 0);
1281 if (emc->num_devices > 1) {
1283 (next->burst_regs[EMC_MRW7_INDEX] &
1285 (last->burst_regs[EMC_MRW7_INDEX] &
1286 0x0000C0C0), EMC_MRW7, 0);
1288 (next->burst_regs[EMC_MRW15_INDEX] &
1290 (last->burst_regs[EMC_MRW15_INDEX] &
1291 0x00003838), EMC_MRW15, 0);
1294 if (opt_zcal_en_cc) {
1295 if (emc->num_devices < 2)
1297 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT
1298 | EMC_ZQ_CAL_ZQ_CAL_CMD,
1300 else if (shared_zq_resistor)
1302 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT
1303 | EMC_ZQ_CAL_ZQ_CAL_CMD,
1307 EMC_ZQ_CAL_ZQ_CAL_CMD,
1313 if (dram_type == DRAM_TYPE_LPDDR4) {
1314 value = (1000 * fake->dram_timings[T_RP]) / src_clk_period;
1315 ccfifo_writel(emc, mr13_flip_fspop | 0x8, EMC_MRW3, value);
1316 ccfifo_writel(emc, 0, 0, tFC_lpddr4 / src_clk_period);
1319 if (dram_type == DRAM_TYPE_LPDDR4 || opt_dvfs_mode != MAN_SR) {
1322 if (cya_allow_ref_cc) {
1323 delay += (1000 * fake->dram_timings[T_RP]) /
1325 delay += 4000 * fake->dram_timings[T_RFC];
1328 ccfifo_writel(emc, emc_pin & ~(EMC_PIN_PIN_CKE_PER_DEV |
1334 /* calculate reference delay multiplier */
1340 if (cya_allow_ref_cc)
1343 if (cya_issue_pc_ref)
1346 if (dram_type != DRAM_TYPE_LPDDR4) {
1347 delay = ((1000 * fake->dram_timings[T_RP] / src_clk_period) +
1348 (1000 * fake->dram_timings[T_RFC] / src_clk_period));
1349 delay = value * delay + 20;
1358 emc_dbg(emc, STEPS, "Step 11\n");
1360 ccfifo_writel(emc, 0x0, EMC_CFG_SYNC, delay);
1362 value = emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE | EMC_DBG_WRITE_ACTIVE_ONLY;
1363 ccfifo_writel(emc, value, EMC_DBG, 0);
1365 ramp_down_wait = tegra210_emc_dvfs_power_ramp_down(emc, src_clk_period,
1370 * And finally - trigger the clock change.
1372 emc_dbg(emc, STEPS, "Step 12\n");
1374 ccfifo_writel(emc, 1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE, 0);
1375 value &= ~EMC_DBG_WRITE_ACTIVE_ONLY;
1376 ccfifo_writel(emc, value, EMC_DBG, 0);
1382 emc_dbg(emc, STEPS, "Step 13\n");
1384 ramp_up_wait = tegra210_emc_dvfs_power_ramp_up(emc, dst_clk_period, 0);
1385 ccfifo_writel(emc, emc_dbg, EMC_DBG, 0);
1391 emc_dbg(emc, STEPS, "Step 14\n");
1393 if (dram_type == DRAM_TYPE_LPDDR4) {
1394 value = emc_pin | EMC_PIN_PIN_CKE;
1396 if (emc->num_devices <= 1)
1397 value &= ~(EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV);
1399 value |= EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE_PER_DEV;
1401 ccfifo_writel(emc, value, EMC_PIN, 0);
1405 * Step 15: (two step 15s ??)
1406 * Calculate zqlatch wait time; has dependency on ramping times.
1408 emc_dbg(emc, STEPS, "Step 15\n");
1410 if (dst_clk_period <= zqcal_before_cc_cutoff) {
1411 s32 t = (s32)(ramp_up_wait + ramp_down_wait) /
1412 (s32)dst_clk_period;
1413 zq_latch_dvfs_wait_time = (s32)tZQCAL_lpddr4_fc_adj - t;
1415 zq_latch_dvfs_wait_time = tZQCAL_lpddr4_fc_adj -
1416 div_o3(1000 * next->dram_timings[T_PDEX],
1420 emc_dbg(emc, INFO, "tZQCAL_lpddr4_fc_adj = %u\n", tZQCAL_lpddr4_fc_adj);
1421 emc_dbg(emc, INFO, "dst_clk_period = %u\n",
1423 emc_dbg(emc, INFO, "next->dram_timings[T_PDEX] = %u\n",
1424 next->dram_timings[T_PDEX]);
1425 emc_dbg(emc, INFO, "zq_latch_dvfs_wait_time = %d\n",
1426 max_t(s32, 0, zq_latch_dvfs_wait_time));
1428 if (dram_type == DRAM_TYPE_LPDDR4 && opt_zcal_en_cc) {
1429 delay = div_o3(1000 * next->dram_timings[T_PDEX],
1432 if (emc->num_devices < 2) {
1433 if (dst_clk_period > zqcal_before_cc_cutoff)
1435 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1436 EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
1439 value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
1440 ccfifo_writel(emc, value, EMC_MRW3, delay);
1441 ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
1442 ccfifo_writel(emc, 0, EMC_REF, 0);
1443 ccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1444 EMC_ZQ_CAL_ZQ_LATCH_CMD,
1446 max_t(s32, 0, zq_latch_dvfs_wait_time));
1447 } else if (shared_zq_resistor) {
1448 if (dst_clk_period > zqcal_before_cc_cutoff)
1450 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1451 EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
1454 ccfifo_writel(emc, 2UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1455 EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
1456 max_t(s32, 0, zq_latch_dvfs_wait_time) +
1458 ccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1459 EMC_ZQ_CAL_ZQ_LATCH_CMD,
1462 value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
1463 ccfifo_writel(emc, value, EMC_MRW3, 0);
1464 ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
1465 ccfifo_writel(emc, 0, EMC_REF, 0);
1467 ccfifo_writel(emc, 1UL << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1468 EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
1469 tZQCAL_lpddr4 / dst_clk_period);
1471 if (dst_clk_period > zqcal_before_cc_cutoff)
1472 ccfifo_writel(emc, EMC_ZQ_CAL_ZQ_CAL_CMD,
1475 value = (mr13_flip_fspop & 0xfffffff7) | 0x0c000000;
1476 ccfifo_writel(emc, value, EMC_MRW3, delay);
1477 ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
1478 ccfifo_writel(emc, 0, EMC_REF, 0);
1480 ccfifo_writel(emc, EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
1481 max_t(s32, 0, zq_latch_dvfs_wait_time));
1485 /* WAR: delay for zqlatch */
1486 ccfifo_writel(emc, 0, 0, 10);
1490 * LPDDR4 Conditional Training Kickoff. Removed.
1495 * MANSR exit self refresh.
1497 emc_dbg(emc, STEPS, "Step 17\n");
1499 if (opt_dvfs_mode == MAN_SR && dram_type != DRAM_TYPE_LPDDR4)
1500 ccfifo_writel(emc, 0, EMC_SELF_REF, 0);
1504 * Send MRWs to LPDDR3/DDR3.
1506 emc_dbg(emc, STEPS, "Step 18\n");
1508 if (dram_type == DRAM_TYPE_LPDDR2) {
1509 ccfifo_writel(emc, next->emc_mrw2, EMC_MRW2, 0);
1510 ccfifo_writel(emc, next->emc_mrw, EMC_MRW, 0);
1512 ccfifo_writel(emc, next->emc_mrw4, EMC_MRW4, 0);
1513 } else if (dram_type == DRAM_TYPE_DDR3) {
1515 ccfifo_writel(emc, next->emc_emrs &
1516 ~EMC_EMRS_USE_EMRS_LONG_CNT, EMC_EMRS, 0);
1517 ccfifo_writel(emc, next->emc_emrs2 &
1518 ~EMC_EMRS2_USE_EMRS2_LONG_CNT, EMC_EMRS2, 0);
1519 ccfifo_writel(emc, next->emc_mrs |
1520 EMC_EMRS_USE_EMRS_LONG_CNT, EMC_MRS, 0);
1525 * ZQCAL for LPDDR3/DDR3
1527 emc_dbg(emc, STEPS, "Step 19\n");
1529 if (opt_zcal_en_cc) {
1530 if (dram_type == DRAM_TYPE_LPDDR2) {
1531 value = opt_cc_short_zcal ? 90000 : 360000;
1532 value = div_o3(value, dst_clk_period);
1534 EMC_MRS_WAIT_CNT2_MRS_EXT2_WAIT_CNT_SHIFT |
1536 EMC_MRS_WAIT_CNT2_MRS_EXT1_WAIT_CNT_SHIFT;
1537 ccfifo_writel(emc, value, EMC_MRS_WAIT_CNT2, 0);
1539 value = opt_cc_short_zcal ? 0x56 : 0xab;
1540 ccfifo_writel(emc, 2 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
1541 EMC_MRW_USE_MRW_EXT_CNT |
1542 10 << EMC_MRW_MRW_MA_SHIFT |
1543 value << EMC_MRW_MRW_OP_SHIFT,
1546 if (emc->num_devices > 1) {
1547 value = 1 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
1548 EMC_MRW_USE_MRW_EXT_CNT |
1549 10 << EMC_MRW_MRW_MA_SHIFT |
1550 value << EMC_MRW_MRW_OP_SHIFT;
1551 ccfifo_writel(emc, value, EMC_MRW, 0);
1553 } else if (dram_type == DRAM_TYPE_DDR3) {
1554 value = opt_cc_short_zcal ? 0 : EMC_ZQ_CAL_LONG;
1556 ccfifo_writel(emc, value |
1557 2 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1558 EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
1561 if (emc->num_devices > 1) {
1562 value = value | 1 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
1563 EMC_ZQ_CAL_ZQ_CAL_CMD;
1564 ccfifo_writel(emc, value, EMC_ZQ_CAL, 0);
1569 if (bg_reg_mode_change) {
1570 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
1572 if (ramp_up_wait <= 1250000)
1573 delay = (1250000 - ramp_up_wait) / dst_clk_period;
1578 next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX],
1579 EMC_PMACRO_BG_BIAS_CTRL_0, delay);
1580 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
1585 * Issue ref and optional QRST.
1587 emc_dbg(emc, STEPS, "Step 20\n");
1589 if (dram_type != DRAM_TYPE_LPDDR4)
1590 ccfifo_writel(emc, 0, EMC_REF, 0);
1592 if (opt_do_sw_qrst) {
1593 ccfifo_writel(emc, 1, EMC_ISSUE_QRST, 0);
1594 ccfifo_writel(emc, 0, EMC_ISSUE_QRST, 2);
1599 * Restore ZCAL and ZCAL interval.
1601 emc_dbg(emc, STEPS, "Step 21\n");
1603 if (save_restore_clkstop_pd || opt_zcal_en_cc) {
1604 ccfifo_writel(emc, emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE,
1606 if (opt_zcal_en_cc && dram_type != DRAM_TYPE_LPDDR4)
1607 ccfifo_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],
1608 EMC_ZCAL_INTERVAL, 0);
1610 if (save_restore_clkstop_pd)
1611 ccfifo_writel(emc, next->burst_regs[EMC_CFG_INDEX] &
1612 ~EMC_CFG_DYN_SELF_REF,
1614 ccfifo_writel(emc, emc_dbg, EMC_DBG, 0);
1619 * Restore EMC_CFG_PIPE_CLK.
1621 emc_dbg(emc, STEPS, "Step 22\n");
1623 ccfifo_writel(emc, emc_cfg_pipe_clk, EMC_CFG_PIPE_CLK, 0);
1625 if (bg_reg_mode_change) {
1628 next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
1629 ~EMC_PMACRO_BG_BIAS_CTRL_0_BGLP_E_PWRD,
1630 EMC_PMACRO_BG_BIAS_CTRL_0);
1633 next->burst_regs[EMC_PMACRO_BG_BIAS_CTRL_0_INDEX] &
1634 ~EMC_PMACRO_BG_BIAS_CTRL_0_BG_E_PWRD,
1635 EMC_PMACRO_BG_BIAS_CTRL_0);
1641 emc_dbg(emc, STEPS, "Step 23\n");
1643 value = emc_readl(emc, EMC_CFG_DIG_DLL);
1644 value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
1645 value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
1646 value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
1647 value &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
1648 value = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |
1649 (2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
1650 emc_writel(emc, value, EMC_CFG_DIG_DLL);
1652 tegra210_emc_do_clock_change(emc, clksrc);
1656 * Save training results. Removed.
1661 * Program MC updown registers.
1663 emc_dbg(emc, STEPS, "Step 25\n");
1665 if (next->rate > last->rate) {
1666 for (i = 0; i < next->num_up_down; i++)
1667 mc_writel(emc->mc, next->la_scale_regs[i],
1668 emc->offsets->la_scale[i]);
1670 tegra210_emc_timing_update(emc);
1675 * Restore ZCAL registers.
1677 emc_dbg(emc, STEPS, "Step 26\n");
1679 if (dram_type == DRAM_TYPE_LPDDR4) {
1680 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
1681 emc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
1683 emc_writel(emc, next->burst_regs[EMC_ZCAL_INTERVAL_INDEX],
1685 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
1688 if (dram_type != DRAM_TYPE_LPDDR4 && opt_zcal_en_cc &&
1689 !opt_short_zcal && opt_cc_short_zcal) {
1692 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
1693 if (dram_type == DRAM_TYPE_LPDDR2)
1694 emc_writel(emc, next->burst_regs[EMC_MRS_WAIT_CNT_INDEX],
1696 else if (dram_type == DRAM_TYPE_DDR3)
1697 emc_writel(emc, next->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
1699 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
1704 * Restore EMC_CFG, FDPD registers.
1706 emc_dbg(emc, STEPS, "Step 27\n");
1708 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
1709 emc_writel(emc, next->burst_regs[EMC_CFG_INDEX], EMC_CFG);
1710 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
1711 emc_writel(emc, next->emc_fdpd_ctrl_cmd_no_ramp,
1712 EMC_FDPD_CTRL_CMD_NO_RAMP);
1713 emc_writel(emc, next->emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
1717 * Training recover. Removed.
1719 emc_dbg(emc, STEPS, "Step 28\n");
1721 tegra210_emc_set_shadow_bypass(emc, ACTIVE);
1723 next->burst_regs[EMC_PMACRO_AUTOCAL_CFG_COMMON_INDEX],
1724 EMC_PMACRO_AUTOCAL_CFG_COMMON);
1725 tegra210_emc_set_shadow_bypass(emc, ASSEMBLY);
1731 emc_dbg(emc, STEPS, "Step 29\n");
1733 emc_writel(emc, EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE0 |
1734 EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE1 |
1735 EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE2 |
1736 EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE3 |
1737 EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE4 |
1738 EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE5 |
1739 EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE6 |
1740 EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE7,
1741 EMC_PMACRO_CFG_PM_GLOBAL_0);
1742 emc_writel(emc, EMC_PMACRO_TRAINING_CTRL_0_CH0_TRAINING_E_WRPTR,
1743 EMC_PMACRO_TRAINING_CTRL_0);
1744 emc_writel(emc, EMC_PMACRO_TRAINING_CTRL_1_CH1_TRAINING_E_WRPTR,
1745 EMC_PMACRO_TRAINING_CTRL_1);
1746 emc_writel(emc, 0, EMC_PMACRO_CFG_PM_GLOBAL_0);
1750 * Re-enable autocal.
1752 emc_dbg(emc, STEPS, "Step 30: Re-enable DLL and AUTOCAL\n");
1754 if (next->burst_regs[EMC_CFG_DIG_DLL_INDEX] & EMC_CFG_DIG_DLL_CFG_DLL_EN) {
1755 value = emc_readl(emc, EMC_CFG_DIG_DLL);
1756 value |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
1757 value |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
1758 value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
1759 value &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
1760 value = (value & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |
1761 (2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
1762 emc_writel(emc, value, EMC_CFG_DIG_DLL);
1763 tegra210_emc_timing_update(emc);
1766 emc_writel(emc, next->emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
1771 const struct tegra210_emc_sequence tegra210_emc_r21021 = {
1773 .set_clock = tegra210_emc_r21021_set_clock,
1774 .periodic_compensation = tegra210_emc_r21021_periodic_compensation,