Merge tag 'arm-dt-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / arch / arm64 / kernel / hibernate.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*:
3  * Hibernate support specific for ARM64
4  *
5  * Derived from work on ARM hibernation support by:
6  *
7  * Ubuntu project, hibernation support for mach-dove
8  * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
9  * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
10  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
11  */
12 #define pr_fmt(x) "hibernate: " x
13 #include <linux/cpu.h>
14 #include <linux/kvm_host.h>
15 #include <linux/pm.h>
16 #include <linux/sched.h>
17 #include <linux/suspend.h>
18 #include <linux/utsname.h>
19
20 #include <asm/barrier.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cputype.h>
23 #include <asm/daifflags.h>
24 #include <asm/irqflags.h>
25 #include <asm/kexec.h>
26 #include <asm/memory.h>
27 #include <asm/mmu_context.h>
28 #include <asm/mte.h>
29 #include <asm/sections.h>
30 #include <asm/smp.h>
31 #include <asm/smp_plat.h>
32 #include <asm/suspend.h>
33 #include <asm/sysreg.h>
34 #include <asm/trans_pgd.h>
35 #include <asm/virt.h>
36
37 /*
38  * Hibernate core relies on this value being 0 on resume, and marks it
39  * __nosavedata assuming it will keep the resume kernel's '0' value. This
40  * doesn't happen with either KASLR.
41  *
42  * defined as "__visible int in_suspend __nosavedata" in
43  * kernel/power/hibernate.c
44  */
45 extern int in_suspend;
46
47 /* Do we need to reset el2? */
48 #define el2_reset_needed() (is_hyp_nvhe())
49
50 /* hyp-stub vectors, used to restore el2 during resume from hibernate. */
51 extern char __hyp_stub_vectors[];
52
53 /*
54  * The logical cpu number we should resume on, initialised to a non-cpu
55  * number.
56  */
57 static int sleep_cpu = -EINVAL;
58
59 /*
60  * Values that may not change over hibernate/resume. We put the build number
61  * and date in here so that we guarantee not to resume with a different
62  * kernel.
63  */
64 struct arch_hibernate_hdr_invariants {
65         char            uts_version[__NEW_UTS_LEN + 1];
66 };
67
68 /* These values need to be know across a hibernate/restore. */
69 static struct arch_hibernate_hdr {
70         struct arch_hibernate_hdr_invariants invariants;
71
72         /* These are needed to find the relocated kernel if built with kaslr */
73         phys_addr_t     ttbr1_el1;
74         void            (*reenter_kernel)(void);
75
76         /*
77          * We need to know where the __hyp_stub_vectors are after restore to
78          * re-configure el2.
79          */
80         phys_addr_t     __hyp_stub_vectors;
81
82         u64             sleep_cpu_mpidr;
83 } resume_hdr;
84
85 static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
86 {
87         memset(i, 0, sizeof(*i));
88         memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
89 }
90
91 int pfn_is_nosave(unsigned long pfn)
92 {
93         unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
94         unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
95
96         return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
97                 crash_is_nosave(pfn);
98 }
99
100 void notrace save_processor_state(void)
101 {
102         WARN_ON(num_online_cpus() != 1);
103 }
104
105 void notrace restore_processor_state(void)
106 {
107 }
108
109 int arch_hibernation_header_save(void *addr, unsigned int max_size)
110 {
111         struct arch_hibernate_hdr *hdr = addr;
112
113         if (max_size < sizeof(*hdr))
114                 return -EOVERFLOW;
115
116         arch_hdr_invariants(&hdr->invariants);
117         hdr->ttbr1_el1          = __pa_symbol(swapper_pg_dir);
118         hdr->reenter_kernel     = _cpu_resume;
119
120         /* We can't use __hyp_get_vectors() because kvm may still be loaded */
121         if (el2_reset_needed())
122                 hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
123         else
124                 hdr->__hyp_stub_vectors = 0;
125
126         /* Save the mpidr of the cpu we called cpu_suspend() on... */
127         if (sleep_cpu < 0) {
128                 pr_err("Failing to hibernate on an unknown CPU.\n");
129                 return -ENODEV;
130         }
131         hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
132         pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
133                 hdr->sleep_cpu_mpidr);
134
135         return 0;
136 }
137 EXPORT_SYMBOL(arch_hibernation_header_save);
138
139 int arch_hibernation_header_restore(void *addr)
140 {
141         int ret;
142         struct arch_hibernate_hdr_invariants invariants;
143         struct arch_hibernate_hdr *hdr = addr;
144
145         arch_hdr_invariants(&invariants);
146         if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
147                 pr_crit("Hibernate image not generated by this kernel!\n");
148                 return -EINVAL;
149         }
150
151         sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr);
152         pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
153                 hdr->sleep_cpu_mpidr);
154         if (sleep_cpu < 0) {
155                 pr_crit("Hibernated on a CPU not known to this kernel!\n");
156                 sleep_cpu = -EINVAL;
157                 return -EINVAL;
158         }
159
160         ret = bringup_hibernate_cpu(sleep_cpu);
161         if (ret) {
162                 sleep_cpu = -EINVAL;
163                 return ret;
164         }
165
166         resume_hdr = *hdr;
167
168         return 0;
169 }
170 EXPORT_SYMBOL(arch_hibernation_header_restore);
171
172 static void *hibernate_page_alloc(void *arg)
173 {
174         return (void *)get_safe_page((__force gfp_t)(unsigned long)arg);
175 }
176
177 /*
178  * Copies length bytes, starting at src_start into an new page,
179  * perform cache maintenance, then maps it at the specified address low
180  * address as executable.
181  *
182  * This is used by hibernate to copy the code it needs to execute when
183  * overwriting the kernel text. This function generates a new set of page
184  * tables, which it loads into ttbr0.
185  *
186  * Length is provided as we probably only want 4K of data, even on a 64K
187  * page system.
188  */
189 static int create_safe_exec_page(void *src_start, size_t length,
190                                  phys_addr_t *phys_dst_addr)
191 {
192         struct trans_pgd_info trans_info = {
193                 .trans_alloc_page       = hibernate_page_alloc,
194                 .trans_alloc_arg        = (__force void *)GFP_ATOMIC,
195         };
196
197         void *page = (void *)get_safe_page(GFP_ATOMIC);
198         phys_addr_t trans_ttbr0;
199         unsigned long t0sz;
200         int rc;
201
202         if (!page)
203                 return -ENOMEM;
204
205         memcpy(page, src_start, length);
206         caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length);
207         rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
208         if (rc)
209                 return rc;
210
211         cpu_install_ttbr0(trans_ttbr0, t0sz);
212         *phys_dst_addr = virt_to_phys(page);
213
214         return 0;
215 }
216
217 #ifdef CONFIG_ARM64_MTE
218
219 static DEFINE_XARRAY(mte_pages);
220
221 static int save_tags(struct page *page, unsigned long pfn)
222 {
223         void *tag_storage, *ret;
224
225         tag_storage = mte_allocate_tag_storage();
226         if (!tag_storage)
227                 return -ENOMEM;
228
229         mte_save_page_tags(page_address(page), tag_storage);
230
231         ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL);
232         if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
233                 mte_free_tag_storage(tag_storage);
234                 return xa_err(ret);
235         } else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) {
236                 mte_free_tag_storage(ret);
237         }
238
239         return 0;
240 }
241
242 static void swsusp_mte_free_storage(void)
243 {
244         XA_STATE(xa_state, &mte_pages, 0);
245         void *tags;
246
247         xa_lock(&mte_pages);
248         xas_for_each(&xa_state, tags, ULONG_MAX) {
249                 mte_free_tag_storage(tags);
250         }
251         xa_unlock(&mte_pages);
252
253         xa_destroy(&mte_pages);
254 }
255
256 static int swsusp_mte_save_tags(void)
257 {
258         struct zone *zone;
259         unsigned long pfn, max_zone_pfn;
260         int ret = 0;
261         int n = 0;
262
263         if (!system_supports_mte())
264                 return 0;
265
266         for_each_populated_zone(zone) {
267                 max_zone_pfn = zone_end_pfn(zone);
268                 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
269                         struct page *page = pfn_to_online_page(pfn);
270
271                         if (!page)
272                                 continue;
273
274                         if (!test_bit(PG_mte_tagged, &page->flags))
275                                 continue;
276
277                         ret = save_tags(page, pfn);
278                         if (ret) {
279                                 swsusp_mte_free_storage();
280                                 goto out;
281                         }
282
283                         n++;
284                 }
285         }
286         pr_info("Saved %d MTE pages\n", n);
287
288 out:
289         return ret;
290 }
291
292 static void swsusp_mte_restore_tags(void)
293 {
294         XA_STATE(xa_state, &mte_pages, 0);
295         int n = 0;
296         void *tags;
297
298         xa_lock(&mte_pages);
299         xas_for_each(&xa_state, tags, ULONG_MAX) {
300                 unsigned long pfn = xa_state.xa_index;
301                 struct page *page = pfn_to_online_page(pfn);
302
303                 mte_restore_page_tags(page_address(page), tags);
304
305                 mte_free_tag_storage(tags);
306                 n++;
307         }
308         xa_unlock(&mte_pages);
309
310         pr_info("Restored %d MTE pages\n", n);
311
312         xa_destroy(&mte_pages);
313 }
314
315 #else   /* CONFIG_ARM64_MTE */
316
317 static int swsusp_mte_save_tags(void)
318 {
319         return 0;
320 }
321
322 static void swsusp_mte_restore_tags(void)
323 {
324 }
325
326 #endif  /* CONFIG_ARM64_MTE */
327
328 int swsusp_arch_suspend(void)
329 {
330         int ret = 0;
331         unsigned long flags;
332         struct sleep_stack_data state;
333
334         if (cpus_are_stuck_in_kernel()) {
335                 pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
336                 return -EBUSY;
337         }
338
339         flags = local_daif_save();
340
341         if (__cpu_suspend_enter(&state)) {
342                 /* make the crash dump kernel image visible/saveable */
343                 crash_prepare_suspend();
344
345                 ret = swsusp_mte_save_tags();
346                 if (ret)
347                         return ret;
348
349                 sleep_cpu = smp_processor_id();
350                 ret = swsusp_save();
351         } else {
352                 /* Clean kernel core startup/idle code to PoC*/
353                 dcache_clean_inval_poc((unsigned long)__mmuoff_data_start,
354                                     (unsigned long)__mmuoff_data_end);
355                 dcache_clean_inval_poc((unsigned long)__idmap_text_start,
356                                     (unsigned long)__idmap_text_end);
357
358                 /* Clean kvm setup code to PoC? */
359                 if (el2_reset_needed()) {
360                         dcache_clean_inval_poc(
361                                 (unsigned long)__hyp_idmap_text_start,
362                                 (unsigned long)__hyp_idmap_text_end);
363                         dcache_clean_inval_poc((unsigned long)__hyp_text_start,
364                                             (unsigned long)__hyp_text_end);
365                 }
366
367                 swsusp_mte_restore_tags();
368
369                 /* make the crash dump kernel image protected again */
370                 crash_post_resume();
371
372                 /*
373                  * Tell the hibernation core that we've just restored
374                  * the memory
375                  */
376                 in_suspend = 0;
377
378                 sleep_cpu = -EINVAL;
379                 __cpu_suspend_exit();
380
381                 /*
382                  * Just in case the boot kernel did turn the SSBD
383                  * mitigation off behind our back, let's set the state
384                  * to what we expect it to be.
385                  */
386                 spectre_v4_enable_mitigation(NULL);
387         }
388
389         local_daif_restore(flags);
390
391         return ret;
392 }
393
394 /*
395  * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
396  *
397  * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
398  * we don't need to free it here.
399  */
400 int swsusp_arch_resume(void)
401 {
402         int rc;
403         void *zero_page;
404         size_t exit_size;
405         pgd_t *tmp_pg_dir;
406         phys_addr_t el2_vectors;
407         void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
408                                           void *, phys_addr_t, phys_addr_t);
409         struct trans_pgd_info trans_info = {
410                 .trans_alloc_page       = hibernate_page_alloc,
411                 .trans_alloc_arg        = (void *)GFP_ATOMIC,
412         };
413
414         /*
415          * Restoring the memory image will overwrite the ttbr1 page tables.
416          * Create a second copy of just the linear map, and use this when
417          * restoring.
418          */
419         rc = trans_pgd_create_copy(&trans_info, &tmp_pg_dir, PAGE_OFFSET,
420                                    PAGE_END);
421         if (rc)
422                 return rc;
423
424         /*
425          * We need a zero page that is zero before & after resume in order
426          * to break before make on the ttbr1 page tables.
427          */
428         zero_page = (void *)get_safe_page(GFP_ATOMIC);
429         if (!zero_page) {
430                 pr_err("Failed to allocate zero page.\n");
431                 return -ENOMEM;
432         }
433
434         if (el2_reset_needed()) {
435                 rc = trans_pgd_copy_el2_vectors(&trans_info, &el2_vectors);
436                 if (rc) {
437                         pr_err("Failed to setup el2 vectors\n");
438                         return rc;
439                 }
440         }
441
442         exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
443         /*
444          * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
445          * a new set of ttbr0 page tables and load them.
446          */
447         rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
448                                    (phys_addr_t *)&hibernate_exit);
449         if (rc) {
450                 pr_err("Failed to create safe executable page for hibernate_exit code.\n");
451                 return rc;
452         }
453
454         /*
455          * KASLR will cause the el2 vectors to be in a different location in
456          * the resumed kernel. Load hibernate's temporary copy into el2.
457          *
458          * We can skip this step if we booted at EL1, or are running with VHE.
459          */
460         if (el2_reset_needed())
461                 __hyp_set_vectors(el2_vectors);
462
463         hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
464                        resume_hdr.reenter_kernel, restore_pblist,
465                        resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
466
467         return 0;
468 }
469
470 int hibernate_resume_nonboot_cpu_disable(void)
471 {
472         if (sleep_cpu < 0) {
473                 pr_err("Failing to resume from hibernate on an unknown CPU.\n");
474                 return -ENODEV;
475         }
476
477         return freeze_secondary_cpus(sleep_cpu);
478 }