Linux 6.9-rc1
[linux-2.6-microblaze.git] / arch / arm64 / kernel / hibernate.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*:
3  * Hibernate support specific for ARM64
4  *
5  * Derived from work on ARM hibernation support by:
6  *
7  * Ubuntu project, hibernation support for mach-dove
8  * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
9  * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
10  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
11  */
12 #define pr_fmt(x) "hibernate: " x
13 #include <linux/cpu.h>
14 #include <linux/kvm_host.h>
15 #include <linux/pm.h>
16 #include <linux/sched.h>
17 #include <linux/suspend.h>
18 #include <linux/utsname.h>
19
20 #include <asm/barrier.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cputype.h>
23 #include <asm/daifflags.h>
24 #include <asm/irqflags.h>
25 #include <asm/kexec.h>
26 #include <asm/memory.h>
27 #include <asm/mmu_context.h>
28 #include <asm/mte.h>
29 #include <asm/sections.h>
30 #include <asm/smp.h>
31 #include <asm/smp_plat.h>
32 #include <asm/suspend.h>
33 #include <asm/sysreg.h>
34 #include <asm/trans_pgd.h>
35 #include <asm/virt.h>
36
37 /*
38  * Hibernate core relies on this value being 0 on resume, and marks it
39  * __nosavedata assuming it will keep the resume kernel's '0' value. This
40  * doesn't happen with either KASLR.
41  *
42  * defined as "__visible int in_suspend __nosavedata" in
43  * kernel/power/hibernate.c
44  */
45 extern int in_suspend;
46
47 /* Do we need to reset el2? */
48 #define el2_reset_needed() (is_hyp_nvhe())
49
50 /* hyp-stub vectors, used to restore el2 during resume from hibernate. */
51 extern char __hyp_stub_vectors[];
52
53 /*
54  * The logical cpu number we should resume on, initialised to a non-cpu
55  * number.
56  */
57 static int sleep_cpu = -EINVAL;
58
59 /*
60  * Values that may not change over hibernate/resume. We put the build number
61  * and date in here so that we guarantee not to resume with a different
62  * kernel.
63  */
64 struct arch_hibernate_hdr_invariants {
65         char            uts_version[__NEW_UTS_LEN + 1];
66 };
67
68 /* These values need to be know across a hibernate/restore. */
69 static struct arch_hibernate_hdr {
70         struct arch_hibernate_hdr_invariants invariants;
71
72         /* These are needed to find the relocated kernel if built with kaslr */
73         phys_addr_t     ttbr1_el1;
74         void            (*reenter_kernel)(void);
75
76         /*
77          * We need to know where the __hyp_stub_vectors are after restore to
78          * re-configure el2.
79          */
80         phys_addr_t     __hyp_stub_vectors;
81
82         u64             sleep_cpu_mpidr;
83 } resume_hdr;
84
85 static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
86 {
87         memset(i, 0, sizeof(*i));
88         memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
89 }
90
91 int pfn_is_nosave(unsigned long pfn)
92 {
93         unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
94         unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
95
96         return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
97                 crash_is_nosave(pfn);
98 }
99
100 void notrace save_processor_state(void)
101 {
102 }
103
104 void notrace restore_processor_state(void)
105 {
106 }
107
108 int arch_hibernation_header_save(void *addr, unsigned int max_size)
109 {
110         struct arch_hibernate_hdr *hdr = addr;
111
112         if (max_size < sizeof(*hdr))
113                 return -EOVERFLOW;
114
115         arch_hdr_invariants(&hdr->invariants);
116         hdr->ttbr1_el1          = __pa_symbol(swapper_pg_dir);
117         hdr->reenter_kernel     = _cpu_resume;
118
119         /* We can't use __hyp_get_vectors() because kvm may still be loaded */
120         if (el2_reset_needed())
121                 hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
122         else
123                 hdr->__hyp_stub_vectors = 0;
124
125         /* Save the mpidr of the cpu we called cpu_suspend() on... */
126         if (sleep_cpu < 0) {
127                 pr_err("Failing to hibernate on an unknown CPU.\n");
128                 return -ENODEV;
129         }
130         hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
131         pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
132                 hdr->sleep_cpu_mpidr);
133
134         return 0;
135 }
136 EXPORT_SYMBOL(arch_hibernation_header_save);
137
138 int arch_hibernation_header_restore(void *addr)
139 {
140         int ret;
141         struct arch_hibernate_hdr_invariants invariants;
142         struct arch_hibernate_hdr *hdr = addr;
143
144         arch_hdr_invariants(&invariants);
145         if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
146                 pr_crit("Hibernate image not generated by this kernel!\n");
147                 return -EINVAL;
148         }
149
150         sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr);
151         pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
152                 hdr->sleep_cpu_mpidr);
153         if (sleep_cpu < 0) {
154                 pr_crit("Hibernated on a CPU not known to this kernel!\n");
155                 sleep_cpu = -EINVAL;
156                 return -EINVAL;
157         }
158
159         ret = bringup_hibernate_cpu(sleep_cpu);
160         if (ret) {
161                 sleep_cpu = -EINVAL;
162                 return ret;
163         }
164
165         resume_hdr = *hdr;
166
167         return 0;
168 }
169 EXPORT_SYMBOL(arch_hibernation_header_restore);
170
171 static void *hibernate_page_alloc(void *arg)
172 {
173         return (void *)get_safe_page((__force gfp_t)(unsigned long)arg);
174 }
175
176 /*
177  * Copies length bytes, starting at src_start into an new page,
178  * perform cache maintenance, then maps it at the specified address low
179  * address as executable.
180  *
181  * This is used by hibernate to copy the code it needs to execute when
182  * overwriting the kernel text. This function generates a new set of page
183  * tables, which it loads into ttbr0.
184  *
185  * Length is provided as we probably only want 4K of data, even on a 64K
186  * page system.
187  */
188 static int create_safe_exec_page(void *src_start, size_t length,
189                                  phys_addr_t *phys_dst_addr)
190 {
191         struct trans_pgd_info trans_info = {
192                 .trans_alloc_page       = hibernate_page_alloc,
193                 .trans_alloc_arg        = (__force void *)GFP_ATOMIC,
194         };
195
196         void *page = (void *)get_safe_page(GFP_ATOMIC);
197         phys_addr_t trans_ttbr0;
198         unsigned long t0sz;
199         int rc;
200
201         if (!page)
202                 return -ENOMEM;
203
204         memcpy(page, src_start, length);
205         caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length);
206         rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
207         if (rc)
208                 return rc;
209
210         cpu_install_ttbr0(trans_ttbr0, t0sz);
211         *phys_dst_addr = virt_to_phys(page);
212
213         return 0;
214 }
215
216 #ifdef CONFIG_ARM64_MTE
217
218 static DEFINE_XARRAY(mte_pages);
219
220 static int save_tags(struct page *page, unsigned long pfn)
221 {
222         void *tag_storage, *ret;
223
224         tag_storage = mte_allocate_tag_storage();
225         if (!tag_storage)
226                 return -ENOMEM;
227
228         mte_save_page_tags(page_address(page), tag_storage);
229
230         ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL);
231         if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
232                 mte_free_tag_storage(tag_storage);
233                 return xa_err(ret);
234         } else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) {
235                 mte_free_tag_storage(ret);
236         }
237
238         return 0;
239 }
240
241 static void swsusp_mte_free_storage(void)
242 {
243         XA_STATE(xa_state, &mte_pages, 0);
244         void *tags;
245
246         xa_lock(&mte_pages);
247         xas_for_each(&xa_state, tags, ULONG_MAX) {
248                 mte_free_tag_storage(tags);
249         }
250         xa_unlock(&mte_pages);
251
252         xa_destroy(&mte_pages);
253 }
254
255 static int swsusp_mte_save_tags(void)
256 {
257         struct zone *zone;
258         unsigned long pfn, max_zone_pfn;
259         int ret = 0;
260         int n = 0;
261
262         if (!system_supports_mte())
263                 return 0;
264
265         for_each_populated_zone(zone) {
266                 max_zone_pfn = zone_end_pfn(zone);
267                 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
268                         struct page *page = pfn_to_online_page(pfn);
269
270                         if (!page)
271                                 continue;
272
273                         if (!page_mte_tagged(page))
274                                 continue;
275
276                         ret = save_tags(page, pfn);
277                         if (ret) {
278                                 swsusp_mte_free_storage();
279                                 goto out;
280                         }
281
282                         n++;
283                 }
284         }
285         pr_info("Saved %d MTE pages\n", n);
286
287 out:
288         return ret;
289 }
290
291 static void swsusp_mte_restore_tags(void)
292 {
293         XA_STATE(xa_state, &mte_pages, 0);
294         int n = 0;
295         void *tags;
296
297         xa_lock(&mte_pages);
298         xas_for_each(&xa_state, tags, ULONG_MAX) {
299                 unsigned long pfn = xa_state.xa_index;
300                 struct page *page = pfn_to_online_page(pfn);
301
302                 mte_restore_page_tags(page_address(page), tags);
303
304                 mte_free_tag_storage(tags);
305                 n++;
306         }
307         xa_unlock(&mte_pages);
308
309         pr_info("Restored %d MTE pages\n", n);
310
311         xa_destroy(&mte_pages);
312 }
313
314 #else   /* CONFIG_ARM64_MTE */
315
316 static int swsusp_mte_save_tags(void)
317 {
318         return 0;
319 }
320
321 static void swsusp_mte_restore_tags(void)
322 {
323 }
324
325 #endif  /* CONFIG_ARM64_MTE */
326
327 int swsusp_arch_suspend(void)
328 {
329         int ret = 0;
330         unsigned long flags;
331         struct sleep_stack_data state;
332
333         if (cpus_are_stuck_in_kernel()) {
334                 pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
335                 return -EBUSY;
336         }
337
338         flags = local_daif_save();
339
340         if (__cpu_suspend_enter(&state)) {
341                 /* make the crash dump kernel image visible/saveable */
342                 crash_prepare_suspend();
343
344                 ret = swsusp_mte_save_tags();
345                 if (ret)
346                         return ret;
347
348                 sleep_cpu = smp_processor_id();
349                 ret = swsusp_save();
350         } else {
351                 /* Clean kernel core startup/idle code to PoC*/
352                 dcache_clean_inval_poc((unsigned long)__mmuoff_data_start,
353                                     (unsigned long)__mmuoff_data_end);
354                 dcache_clean_inval_poc((unsigned long)__idmap_text_start,
355                                     (unsigned long)__idmap_text_end);
356
357                 /* Clean kvm setup code to PoC? */
358                 if (el2_reset_needed()) {
359                         dcache_clean_inval_poc(
360                                 (unsigned long)__hyp_idmap_text_start,
361                                 (unsigned long)__hyp_idmap_text_end);
362                         dcache_clean_inval_poc((unsigned long)__hyp_text_start,
363                                             (unsigned long)__hyp_text_end);
364                 }
365
366                 swsusp_mte_restore_tags();
367
368                 /* make the crash dump kernel image protected again */
369                 crash_post_resume();
370
371                 /*
372                  * Tell the hibernation core that we've just restored
373                  * the memory
374                  */
375                 in_suspend = 0;
376
377                 sleep_cpu = -EINVAL;
378                 __cpu_suspend_exit();
379
380                 /*
381                  * Just in case the boot kernel did turn the SSBD
382                  * mitigation off behind our back, let's set the state
383                  * to what we expect it to be.
384                  */
385                 spectre_v4_enable_mitigation(NULL);
386         }
387
388         local_daif_restore(flags);
389
390         return ret;
391 }
392
393 /*
394  * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
395  *
396  * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
397  * we don't need to free it here.
398  */
399 int swsusp_arch_resume(void)
400 {
401         int rc;
402         void *zero_page;
403         size_t exit_size;
404         pgd_t *tmp_pg_dir;
405         phys_addr_t el2_vectors;
406         void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
407                                           void *, phys_addr_t, phys_addr_t);
408         struct trans_pgd_info trans_info = {
409                 .trans_alloc_page       = hibernate_page_alloc,
410                 .trans_alloc_arg        = (void *)GFP_ATOMIC,
411         };
412
413         /*
414          * Restoring the memory image will overwrite the ttbr1 page tables.
415          * Create a second copy of just the linear map, and use this when
416          * restoring.
417          */
418         rc = trans_pgd_create_copy(&trans_info, &tmp_pg_dir, PAGE_OFFSET,
419                                    PAGE_END);
420         if (rc)
421                 return rc;
422
423         /*
424          * We need a zero page that is zero before & after resume in order
425          * to break before make on the ttbr1 page tables.
426          */
427         zero_page = (void *)get_safe_page(GFP_ATOMIC);
428         if (!zero_page) {
429                 pr_err("Failed to allocate zero page.\n");
430                 return -ENOMEM;
431         }
432
433         if (el2_reset_needed()) {
434                 rc = trans_pgd_copy_el2_vectors(&trans_info, &el2_vectors);
435                 if (rc) {
436                         pr_err("Failed to setup el2 vectors\n");
437                         return rc;
438                 }
439         }
440
441         exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
442         /*
443          * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
444          * a new set of ttbr0 page tables and load them.
445          */
446         rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
447                                    (phys_addr_t *)&hibernate_exit);
448         if (rc) {
449                 pr_err("Failed to create safe executable page for hibernate_exit code.\n");
450                 return rc;
451         }
452
453         /*
454          * KASLR will cause the el2 vectors to be in a different location in
455          * the resumed kernel. Load hibernate's temporary copy into el2.
456          *
457          * We can skip this step if we booted at EL1, or are running with VHE.
458          */
459         if (el2_reset_needed())
460                 __hyp_set_vectors(el2_vectors);
461
462         hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
463                        resume_hdr.reenter_kernel, restore_pblist,
464                        resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
465
466         return 0;
467 }
468
469 int hibernate_resume_nonboot_cpu_disable(void)
470 {
471         if (sleep_cpu < 0) {
472                 pr_err("Failing to resume from hibernate on an unknown CPU.\n");
473                 return -ENODEV;
474         }
475
476         return freeze_secondary_cpus(sleep_cpu);
477 }