1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/threads.h>
3 #include <asm/processor.h>
5 #include <asm/cputable.h>
6 #include <asm/thread_info.h>
7 #include <asm/ppc_asm.h>
8 #include <asm/asm-offsets.h>
10 #include <asm/feature-fixups.h>
13 * Structure for storing CPU registers on the save area.
19 #define SL_SPRG0 0x10 /* 4 sprg's */
32 #define SL_R12 0x74 /* r12 to r31 */
33 #define SL_SIZE (SL_R12 + 80)
38 _GLOBAL(swsusp_save_area)
45 _GLOBAL(swsusp_arch_suspend)
47 lis r11,swsusp_save_area@h
48 ori r11,r11,swsusp_save_area@l
64 /* Get a stable timebase and save it */
77 stw r4,SL_SPRG0+4(r11)
79 stw r4,SL_SPRG0+8(r11)
81 stw r4,SL_SPRG0+12(r11)
87 stw r4,SL_DBAT0+4(r11)
91 stw r4,SL_DBAT1+4(r11)
95 stw r4,SL_DBAT2+4(r11)
99 stw r4,SL_DBAT3+4(r11)
103 stw r4,SL_IBAT0+4(r11)
107 stw r4,SL_IBAT1+4(r11)
111 stw r4,SL_IBAT2+4(r11)
115 stw r4,SL_IBAT3+4(r11)
118 /* Backup various CPU config stuffs */
121 /* Call the low level suspend stuff (we should probably have made
126 /* Restore LR from the save area */
127 lis r11,swsusp_save_area@h
128 ori r11,r11,swsusp_save_area@l
136 _GLOBAL(swsusp_arch_resume)
138 #ifdef CONFIG_ALTIVEC
139 /* Stop pending alitvec streams and memory accesses */
142 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
146 /* Disable MSR:DR to make sure we don't take a TLB or
147 * hash miss during the copy, as our hash table will
148 * for a while be unusable. For .text, we assume we are
149 * covered by a BAT. This works only for non-G5 at this
150 * point. G5 will need a better approach, possibly using
151 * a small temporary hash table filled with large mappings,
152 * disabling the MMU completely isn't a good option for
153 * performance reasons.
154 * (Note that 750's may have the same performance issue as
155 * the G5 in this case, we should investigate using moving
156 * BATs for these CPUs)
160 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
165 /* Load ptr the list of pages to copy in r3 */
166 lis r11,(restore_pblist - KERNELBASE)@h
167 ori r11,r11,restore_pblist@l
170 /* Copy the pages. This is a very basic implementation, to
171 * be replaced by something more cache efficient */
176 lwz r11,pbe_address(r3) /* source */
178 lwz r10,pbe_orig_address(r3) /* destination */
196 /* Do a very simple cache flush/inval of the L1 to ensure
197 * coherency of the icache
209 /* Now flush those cache lines */
219 /* Ok, we are now running with the kernel data of the old
220 * kernel fully restored. We can get to the save area
221 * easily now. As for the rest of the code, it assumes the
222 * loader kernel and the booted one are exactly identical
224 lis r11,swsusp_save_area@h
225 ori r11,r11,swsusp_save_area@l
229 /* Restore various CPU config stuffs */
230 bl __restore_cpu_setup
232 /* Restore the BATs, and SDR1. Then we can turn on the MMU.
233 * This is a bit hairy as we are running out of those BATs,
234 * but first, our code is probably in the icache, and we are
235 * writing the same value to the BAT, so that should be fine,
236 * though a better solution will have to be found long-term
242 lwz r4,SL_SPRG0+4(r11)
244 lwz r4,SL_SPRG0+8(r11)
246 lwz r4,SL_SPRG0+12(r11)
252 lwz r4,SL_DBAT0+4(r11)
256 lwz r4,SL_DBAT1+4(r11)
260 lwz r4,SL_DBAT2+4(r11)
264 lwz r4,SL_DBAT3+4(r11)
268 lwz r4,SL_IBAT0+4(r11)
272 lwz r4,SL_IBAT1+4(r11)
276 lwz r4,SL_IBAT2+4(r11)
280 lwz r4,SL_IBAT3+4(r11)
284 BEGIN_MMU_FTR_SECTION
302 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
306 1: addic. r4,r4,-0x1000
311 /* restore the MSR and turn on the MMU */
324 /* Kick decrementer */
328 /* Restore the callee-saved registers and return */
337 // XXX Note: we don't really need to call swsusp_resume
342 /* FIXME:This construct is actually not useful since we don't shut
343 * down the instruction MMU, we could just flip back MSR-DR on.