1 // SPDX-License-Identifier: GPL-2.0-only
3 * OMAP3 Power Management Routines
5 * Copyright (C) 2006-2008 Nokia Corporation
6 * Tony Lindgren <tony@atomide.com>
9 * Copyright (C) 2007 Texas Instruments, Inc.
10 * Rajendra Nayak <rnayak@ti.com>
12 * Copyright (C) 2005 Texas Instruments, Inc.
13 * Richard Woodruff <r-woodruff2@ti.com>
15 * Based on pm.c for omap1
18 #include <linux/cpu_pm.h>
20 #include <linux/suspend.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/list.h>
24 #include <linux/err.h>
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <linux/omap-dma.h>
29 #include <linux/omap-gpmc.h>
31 #include <trace/events/power.h>
33 #include <asm/fncpy.h>
34 #include <asm/suspend.h>
35 #include <asm/system_misc.h>
37 #include "clockdomain.h"
38 #include "powerdomain.h"
42 #include "cm-regbits-34xx.h"
43 #include "prm-regbits-34xx.h"
47 #include "omap-secure.h"
52 /* pm34xx errata defined in pm.h */
56 struct powerdomain *pwrdm;
61 struct list_head node;
64 static LIST_HEAD(pwrst_list);
66 void (*omap3_do_wfi_sram)(void);
68 static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
69 static struct powerdomain *core_pwrdm, *per_pwrdm;
71 static void omap3_core_save_context(void)
73 omap3_ctrl_save_padconf();
76 * Force write last pad into memory, as this can fail in some
77 * cases according to errata 1.157, 1.185
79 omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
80 OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
82 /* Save the Interrupt controller context */
83 omap_intc_save_context();
84 /* Save the GPMC context */
85 omap3_gpmc_save_context();
86 /* Save the system control module context, padconf already save above*/
87 omap3_control_save_context();
88 omap_dma_global_context_save();
91 static void omap3_core_restore_context(void)
93 /* Restore the control module context, padconf restored by h/w */
94 omap3_control_restore_context();
95 /* Restore the GPMC context */
96 omap3_gpmc_restore_context();
97 /* Restore the interrupt controller context */
98 omap_intc_restore_context();
99 omap_dma_global_context_restore();
103 * FIXME: This function should be called before entering off-mode after
104 * OMAP3 secure services have been accessed. Currently it is only called
105 * once during boot sequence, but this works as we are not using secure
108 static void omap3_save_secure_ram_context(void)
111 int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
113 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
115 * MPU next state must be set to POWER_ON temporarily,
116 * otherwise the WFI executed inside the ROM code
117 * will hang the system.
119 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
120 ret = omap3_save_secure_ram(omap3_secure_ram_storage,
121 OMAP3_SAVE_SECURE_RAM_SZ);
122 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
123 /* Following is for error tracking, it should not happen */
125 pr_err("save_secure_sram() returns %08x\n", ret);
132 static irqreturn_t _prcm_int_handle_io(int irq, void *unused)
136 c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, OMAP3430_ST_IO_MASK |
137 OMAP3430_ST_IO_CHAIN_MASK);
139 return c ? IRQ_HANDLED : IRQ_NONE;
142 static irqreturn_t _prcm_int_handle_wakeup(int irq, void *unused)
147 * Clear all except ST_IO and ST_IO_CHAIN for wkup module,
148 * these are handled in a separate handler to avoid acking
149 * IO events before parsing in mux code
151 c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, ~(OMAP3430_ST_IO_MASK |
152 OMAP3430_ST_IO_CHAIN_MASK));
153 c += omap_prm_clear_mod_irqs(CORE_MOD, 1, ~0);
154 c += omap_prm_clear_mod_irqs(OMAP3430_PER_MOD, 1, ~0);
155 if (omap_rev() > OMAP3430_REV_ES1_0) {
156 c += omap_prm_clear_mod_irqs(CORE_MOD, 3, ~0);
157 c += omap_prm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, ~0);
160 return c ? IRQ_HANDLED : IRQ_NONE;
163 static void omap34xx_save_context(u32 *save)
167 /* Read Auxiliary Control Register */
168 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val));
172 /* Read L2 AUX ctrl register */
173 asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val));
178 static int omap34xx_do_sram_idle(unsigned long save_state)
180 omap34xx_cpu_suspend(save_state);
184 void omap_sram_idle(void)
186 /* Variable to tell what needs to be saved and restored
187 * in omap_sram_idle*/
188 /* save_state = 0 => Nothing to save and restored */
189 /* save_state = 1 => Only L1 and logic lost */
190 /* save_state = 2 => Only L2 lost */
191 /* save_state = 3 => L1, L2 and logic lost */
193 int mpu_next_state = PWRDM_POWER_ON;
194 int per_next_state = PWRDM_POWER_ON;
195 int core_next_state = PWRDM_POWER_ON;
198 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
199 switch (mpu_next_state) {
201 case PWRDM_POWER_RET:
202 /* No need to save context */
205 case PWRDM_POWER_OFF:
210 pr_err("Invalid mpu state in sram_idle\n");
215 if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
216 pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
218 /* Enable IO-PAD and IO-CHAIN wakeups */
219 per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
220 core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
222 pwrdm_pre_transition(NULL);
225 if (per_next_state == PWRDM_POWER_OFF)
226 cpu_cluster_pm_enter();
229 if (core_next_state < PWRDM_POWER_ON) {
230 if (core_next_state == PWRDM_POWER_OFF) {
231 omap3_core_save_context();
232 omap3_cm_save_context();
236 /* Configure PMIC signaling for I2C4 or sys_off_mode */
237 omap3_vc_set_pmic_signaling(core_next_state);
239 omap3_intc_prepare_idle();
242 * On EMU/HS devices ROM code restores a SRDC value
243 * from scratchpad which has automatic self refresh on timeout
244 * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443.
245 * Hence store/restore the SDRC_POWER register here.
247 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
248 (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
249 omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
250 core_next_state == PWRDM_POWER_OFF)
251 sdrc_pwr = sdrc_read_reg(SDRC_POWER);
254 * omap3_arm_context is the location where some ARM context
255 * get saved. The rest is placed on the stack, and restored
256 * from there before resuming.
259 omap34xx_save_context(omap3_arm_context);
260 if (save_state == 1 || save_state == 3)
261 cpu_suspend(save_state, omap34xx_do_sram_idle);
263 omap34xx_do_sram_idle(save_state);
265 /* Restore normal SDRC POWER settings */
266 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
267 (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
268 omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
269 core_next_state == PWRDM_POWER_OFF)
270 sdrc_write_reg(sdrc_pwr, SDRC_POWER);
273 if (core_next_state < PWRDM_POWER_ON &&
274 pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) {
275 omap3_core_restore_context();
276 omap3_cm_restore_context();
277 omap3_sram_restore_context();
278 omap2_sms_restore_context();
281 * In off-mode resume path above, omap3_core_restore_context
282 * also handles the INTC autoidle restore done here so limit
283 * this to non-off mode resume paths so we don't do it twice.
285 omap3_intc_resume_idle();
288 pwrdm_post_transition(NULL);
291 if (per_next_state == PWRDM_POWER_OFF)
292 cpu_cluster_pm_exit();
295 static void omap3_pm_idle(void)
297 if (omap_irq_pending())
300 trace_cpu_idle_rcuidle(1, smp_processor_id());
304 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
307 #ifdef CONFIG_SUSPEND
308 static int omap3_pm_suspend(void)
310 struct power_state *pwrst;
313 /* Read current next_pwrsts */
314 list_for_each_entry(pwrst, &pwrst_list, node)
315 pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
316 /* Set ones wanted by suspend */
317 list_for_each_entry(pwrst, &pwrst_list, node) {
318 if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
320 if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
324 omap3_intc_suspend();
329 /* Restore next_pwrsts */
330 list_for_each_entry(pwrst, &pwrst_list, node) {
331 state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
332 if (state > pwrst->next_state) {
333 pr_info("Powerdomain (%s) didn't enter target state %d\n",
334 pwrst->pwrdm->name, pwrst->next_state);
337 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
340 pr_err("Could not enter target state in pm_suspend\n");
342 pr_info("Successfully put all powerdomains to target state\n");
347 #define omap3_pm_suspend NULL
348 #endif /* CONFIG_SUSPEND */
350 static void __init prcm_setup_regs(void)
354 omap3_prm_init_pm(cpu_is_omap3630(), omap3_has_iva());
357 void omap3_pm_off_mode_enable(int enable)
359 struct power_state *pwrst;
363 state = PWRDM_POWER_OFF;
365 state = PWRDM_POWER_RET;
367 list_for_each_entry(pwrst, &pwrst_list, node) {
368 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
369 pwrst->pwrdm == core_pwrdm &&
370 state == PWRDM_POWER_OFF) {
371 pwrst->next_state = PWRDM_POWER_RET;
372 pr_warn("%s: Core OFF disabled due to errata i583\n",
375 pwrst->next_state = state;
377 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
381 int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
383 struct power_state *pwrst;
385 list_for_each_entry(pwrst, &pwrst_list, node) {
386 if (pwrst->pwrdm == pwrdm)
387 return pwrst->next_state;
392 int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
394 struct power_state *pwrst;
396 list_for_each_entry(pwrst, &pwrst_list, node) {
397 if (pwrst->pwrdm == pwrdm) {
398 pwrst->next_state = state;
405 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
407 struct power_state *pwrst;
412 pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
415 pwrst->pwrdm = pwrdm;
416 pwrst->next_state = PWRDM_POWER_RET;
417 list_add(&pwrst->node, &pwrst_list);
419 if (pwrdm_has_hdwr_sar(pwrdm))
420 pwrdm_enable_hdwr_sar(pwrdm);
422 return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
426 * Push functions to SRAM
428 * The minimum set of functions is pushed to SRAM for execution:
429 * - omap3_do_wfi for erratum i581 WA,
431 void omap_push_sram_idle(void)
433 omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
436 static void __init pm_errata_configure(void)
438 if (cpu_is_omap3630()) {
439 pm34xx_errata |= PM_RTA_ERRATUM_i608;
440 /* Enable the l2 cache toggling in sleep logic */
441 enable_omap3630_toggle_l2_on_restore();
442 if (omap_rev() < OMAP3630_REV_ES1_2)
443 pm34xx_errata |= (PM_SDRC_WAKEUP_ERRATUM_i583 |
444 PM_PER_MEMORIES_ERRATUM_i582);
445 } else if (cpu_is_omap34xx()) {
446 pm34xx_errata |= PM_PER_MEMORIES_ERRATUM_i582;
450 int __init omap3_pm_init(void)
452 struct power_state *pwrst, *tmp;
453 struct clockdomain *neon_clkdm, *mpu_clkdm, *per_clkdm, *wkup_clkdm;
456 if (!omap3_has_io_chain_ctrl())
457 pr_warn("PM: no software I/O chain control; some wakeups may be lost\n");
459 pm_errata_configure();
461 /* XXX prcm_setup_regs needs to be before enabling hw
462 * supervised mode for powerdomains */
465 ret = request_irq(omap_prcm_event_to_irq("wkup"),
466 _prcm_int_handle_wakeup, IRQF_NO_SUSPEND, "pm_wkup", NULL);
469 pr_err("pm: Failed to request pm_wkup irq\n");
473 /* IO interrupt is shared with mux code */
474 ret = request_irq(omap_prcm_event_to_irq("io"),
475 _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
479 pr_err("pm: Failed to request pm_io irq\n");
483 ret = pwrdm_for_each(pwrdms_setup, NULL);
485 pr_err("Failed to setup powerdomains\n");
489 (void) clkdm_for_each(omap_pm_clkdms_setup, NULL);
491 mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
492 if (mpu_pwrdm == NULL) {
493 pr_err("Failed to get mpu_pwrdm\n");
498 neon_pwrdm = pwrdm_lookup("neon_pwrdm");
499 per_pwrdm = pwrdm_lookup("per_pwrdm");
500 core_pwrdm = pwrdm_lookup("core_pwrdm");
502 neon_clkdm = clkdm_lookup("neon_clkdm");
503 mpu_clkdm = clkdm_lookup("mpu_clkdm");
504 per_clkdm = clkdm_lookup("per_clkdm");
505 wkup_clkdm = clkdm_lookup("wkup_clkdm");
507 omap_common_suspend_init(omap3_pm_suspend);
509 arm_pm_idle = omap3_pm_idle;
513 * RTA is disabled during initialization as per erratum i608
514 * it is safer to disable RTA by the bootloader, but we would like
515 * to be doubly sure here and prevent any mishaps.
517 if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
518 omap3630_ctrl_disable_rta();
521 * The UART3/4 FIFO and the sidetone memory in McBSP2/3 are
522 * not correctly reset when the PER powerdomain comes back
523 * from OFF or OSWR when the CORE powerdomain is kept active.
524 * See OMAP36xx Erratum i582 "PER Domain reset issue after
525 * Domain-OFF/OSWR Wakeup". This wakeup dependency is not a
526 * complete workaround. The kernel must also prevent the PER
527 * powerdomain from going to OSWR/OFF while the CORE
528 * powerdomain is not going to OSWR/OFF. And if PER last
529 * power state was off while CORE last power state was ON, the
530 * UART3/4 and McBSP2/3 SIDETONE devices need to run a
531 * self-test using their loopback tests; if that fails, those
532 * devices are unusable until the PER/CORE can complete a transition
533 * from ON to OSWR/OFF and then back to ON.
535 * XXX Technically this workaround is only needed if off-mode
536 * or OSWR is enabled.
538 if (IS_PM34XX_ERRATUM(PM_PER_MEMORIES_ERRATUM_i582))
539 clkdm_add_wkdep(per_clkdm, wkup_clkdm);
541 clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
542 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
543 omap3_secure_ram_storage =
544 kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL);
545 if (!omap3_secure_ram_storage)
546 pr_err("Memory allocation failed when allocating for secure sram context\n");
550 omap_dma_global_context_save();
551 omap3_save_secure_ram_context();
552 omap_dma_global_context_restore();
557 omap3_save_scratchpad_contents();
561 list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
562 list_del(&pwrst->node);
565 free_irq(omap_prcm_event_to_irq("io"), omap3_pm_init);
567 free_irq(omap_prcm_event_to_irq("wkup"), NULL);