1 /* SPDX-License-Identifier: GPL-2.0-only */
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 #include <linux/linkage.h>
10 #include <asm/assembler.h>
11 #include <asm/smp_scu.h>
12 #include <asm/memory.h>
13 #include <asm/hardware/cache-l2x0.h>
15 #include "omap-secure.h"
19 #include "omap4-sar-layout.h"
21 #if defined(CONFIG_SMP) && defined(CONFIG_PM)
29 #ifdef CONFIG_ARCH_OMAP4
32 * =============================
33 * == CPU suspend finisher ==
34 * =============================
36 * void omap4_finish_suspend(unsigned long cpu_state)
38 * This function code saves the CPU context and performs the CPU
39 * power down sequence. Calling WFI effectively changes the CPU
40 * power domains states to the desired target power state.
42 * @cpu_state : contains context save state (r0)
44 * 1 - CPUx L1 and logic lost: MPUSS CSWR
45 * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
46 * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
47 * @return: This function never returns for CPU OFF and DORMANT power states.
48 * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
49 * from this follows a full CPU reset path via ROM code to CPU restore code.
50 * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
51 * It returns to the caller for CPU INACTIVE and ON power states or in case
52 * CPU failed to transition to targeted OFF/DORMANT state.
54 * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
55 * stack frame and it expects the caller to take care of it. Hence the entire
56 * stack frame is saved to avoid possible stack corruption.
58 ENTRY(omap4_finish_suspend)
59 stmfd sp!, {r4-r12, lr}
61 beq do_WFI @ No lowpower state, jump to WFI
64 * Flush all data from the L1 data cache before disabling
67 bl omap4_get_sar_ram_base
68 ldr r9, [r0, #OMAP_TYPE_OFFSET]
69 cmp r9, #0x1 @ Check for HS device
70 bne skip_secure_l1_clean
71 mov r0, #SCU_PM_NORMAL
72 mov r1, #0xFF @ clean seucre L1
73 stmfd r13!, {r4-r12, r14}
74 ldr r12, =OMAP4_MON_SCU_PWR_INDEX
76 ldmfd r13!, {r4-r12, r14}
78 bl v7_flush_dcache_all
81 * Clear the SCTLR.C bit to prevent further data cache
82 * allocation. Clearing SCTLR.C would make all the data accesses
83 * strongly ordered and would not hit the cache.
85 mrc p15, 0, r0, c1, c0, 0
86 bic r0, r0, #(1 << 2) @ Disable the C bit
87 mcr p15, 0, r0, c1, c0, 0
93 * Switch the CPU from Symmetric Multiprocessing (SMP) mode
94 * to AsymmetricMultiprocessing (AMP) mode by programming
95 * the SCU power status to DORMANT or OFF mode.
96 * This enables the CPU to be taken out of coherency by
97 * preventing the CPU from receiving cache, TLB, or BTB
98 * maintenance operations broadcast by other CPUs in the cluster.
100 bl omap4_get_sar_ram_base
102 ldr r9, [r8, #OMAP_TYPE_OFFSET]
103 cmp r9, #0x1 @ Check for HS device
105 mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
107 ldreq r0, [r8, #SCU_OFFSET0]
108 ldrne r0, [r8, #SCU_OFFSET1]
110 stmfd r13!, {r4-r12, r14}
111 ldr r12, =OMAP4_MON_SCU_PWR_INDEX
113 ldmfd r13!, {r4-r12, r14}
116 mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
118 ldreq r1, [r8, #SCU_OFFSET0]
119 ldrne r1, [r8, #SCU_OFFSET1]
120 bl omap4_get_scu_base
123 mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data
125 mrcne p15, 0, r0, c1, c0, 1
126 bicne r0, r0, #(1 << 6) @ Disable SMP bit
127 mcrne p15, 0, r0, c1, c0, 1
130 #ifdef CONFIG_CACHE_L2X0
132 * Clean and invalidate the L2 cache.
133 * Common cache-l2x0.c functions can't be used here since it
134 * uses spinlocks. We are out of coherency here with data cache
135 * disabled. The spinlock implementation uses exclusive load/store
136 * instruction which can fail without data cache being enabled.
137 * OMAP4 hardware doesn't support exclusive monitor which can
138 * overcome exclusive access issue. Because of this, CPU can
141 bl omap4_get_sar_ram_base
143 mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR
145 ldreq r0, [r8, #L2X0_SAVE_OFFSET0] @ Retrieve L2 state from SAR
146 ldrne r0, [r8, #L2X0_SAVE_OFFSET1] @ memory.
149 #ifdef CONFIG_PL310_ERRATA_727915
151 mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
154 bl omap4_get_l2cache_base
157 str r0, [r2, #L2X0_CLEAN_INV_WAY]
159 ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
163 #ifdef CONFIG_PL310_ERRATA_727915
165 mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
169 bl omap4_get_l2cache_base
172 str r0, [r2, #L2X0_CACHE_SYNC]
174 ldr r0, [r2, #L2X0_CACHE_SYNC]
183 * CPU is here when it failed to enter OFF/DORMANT or
184 * no low power state was attempted.
186 mrc p15, 0, r0, c1, c0, 0
187 tst r0, #(1 << 2) @ Check C bit enabled?
188 orreq r0, r0, #(1 << 2) @ Enable the C bit
189 mcreq p15, 0, r0, c1, c0, 0
193 * Ensure the CPU power state is set to NORMAL in
194 * SCU power state so that CPU is back in coherency.
195 * In non-coherent mode CPU can lock-up and lead to
198 mrc p15, 0, r0, c1, c0, 1
199 tst r0, #(1 << 6) @ Check SMP bit enabled?
200 orreq r0, r0, #(1 << 6)
201 mcreq p15, 0, r0, c1, c0, 1
203 bl omap4_get_sar_ram_base
205 ldr r9, [r8, #OMAP_TYPE_OFFSET]
206 cmp r9, #0x1 @ Check for HS device
208 mov r0, #SCU_PM_NORMAL
210 stmfd r13!, {r4-r12, r14}
211 ldr r12, =OMAP4_MON_SCU_PWR_INDEX
213 ldmfd r13!, {r4-r12, r14}
216 bl omap4_get_scu_base
217 mov r1, #SCU_PM_NORMAL
222 ldmfd sp!, {r4-r12, pc}
223 ENDPROC(omap4_finish_suspend)
226 * ============================
227 * == CPU resume entry point ==
228 * ============================
230 * void omap4_cpu_resume(void)
232 * ROM code jumps to this function while waking up from CPU
233 * OFF or DORMANT state. Physical address of the function is
234 * stored in the SAR RAM while entering to OFF or DORMANT mode.
235 * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
237 ENTRY(omap4_cpu_resume)
239 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
240 * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
241 * init and for CPU1, a secure PPA API provided. CPU0 must be ON
242 * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
243 * OMAP443X GP devices- SMP bit isn't accessible.
244 * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
246 ldr r8, =OMAP44XX_SAR_RAM_BASE
247 ldr r9, [r8, #OMAP_TYPE_OFFSET]
248 cmp r9, #0x1 @ Skip if GP device
249 bne skip_ns_smp_enable
250 mrc p15, 0, r0, c0, c0, 5
252 beq skip_ns_smp_enable
254 mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
255 adr r1, ppa_zero_params_offset
257 add r3, r3, r1 @ Pointer to ppa_zero_params
258 mov r1, #0x0 @ Process ID
261 mov r12, #0x00 @ Secure Service ID
263 cmp r0, #0x0 @ API returns 0 on success.
267 mrc p15, 0, r0, c1, c0, 1
268 tst r0, #(1 << 6) @ Check SMP bit enabled?
269 orreq r0, r0, #(1 << 6)
270 mcreq p15, 0, r0, c1, c0, 1
273 #ifdef CONFIG_CACHE_L2X0
275 * Restore the L2 AUXCTRL and enable the L2 cache.
276 * OMAP4_MON_L2X0_AUXCTRL_INDEX = Program the L2X0 AUXCTRL
277 * OMAP4_MON_L2X0_CTRL_INDEX = Enable the L2 using L2X0 CTRL
278 * register r0 contains value to be programmed.
279 * L2 cache is already invalidate by ROM code as part
280 * of MPUSS OFF wakeup path.
282 ldr r2, =OMAP44XX_L2CACHE_BASE
283 ldr r0, [r2, #L2X0_CTRL]
286 beq skip_l2en @ Skip if already enabled
287 ldr r3, =OMAP44XX_SAR_RAM_BASE
288 ldr r1, [r3, #OMAP_TYPE_OFFSET]
289 cmp r1, #0x1 @ Check for HS device
291 ldr r0, =OMAP4_PPA_L2_POR_INDEX
292 ldr r1, =OMAP44XX_SAR_RAM_BASE
293 ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
294 adr r1, ppa_por_params_offset
296 add r3, r3, r1 @ Pointer to ppa_por_params
298 mov r1, #0x0 @ Process ID
301 mov r12, #0x00 @ Secure Service ID
305 ldr r1, =OMAP44XX_SAR_RAM_BASE
306 ldr r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
307 ldr r12, =OMAP4_MON_L2X0_PREFETCH_INDEX @ Setup L2 PREFETCH
310 ldr r1, =OMAP44XX_SAR_RAM_BASE
311 ldr r0, [r1, #L2X0_AUXCTRL_OFFSET]
312 ldr r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX @ Setup L2 AUXCTRL
315 ldr r12, =OMAP4_MON_L2X0_CTRL_INDEX @ Enable L2 cache
320 b cpu_resume @ Jump to generic resume
321 ppa_por_params_offset:
322 .long ppa_por_params - .
323 ENDPROC(omap4_cpu_resume)
324 #endif /* CONFIG_ARCH_OMAP4 */
326 #endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
330 #ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
331 /* Drain interconnect write buffers. */
332 bl omap_interconnect_sync
336 * Execute an ISB instruction to ensure that all of the
337 * CP15 register changes have been committed.
342 * Execute a barrier instruction to ensure that all cache,
343 * TLB and branch predictor maintenance operations issued
344 * by any CPU in the cluster have completed.
350 * Execute a WFI instruction and wait until the
351 * STANDBYWFI output is asserted to indicate that the
352 * CPU is in idle and low power state. CPU can specualatively
353 * prefetch the instructions so add NOPs after WFI. Sixteen
354 * NOPs as per Cortex-A9 pipeline.
356 wfi @ Wait For Interrupt
375 ppa_zero_params_offset:
376 .long ppa_zero_params - .