x86/Kconfig: introduce ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
[linux-2.6-microblaze.git] / arch / riscv / kernel / patch.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 SiFive
4  */
5
6 #include <linux/spinlock.h>
7 #include <linux/mm.h>
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/stop_machine.h>
11 #include <asm/kprobes.h>
12 #include <asm/cacheflush.h>
13 #include <asm/fixmap.h>
14 #include <asm/patch.h>
15
16 struct patch_insn {
17         void *addr;
18         u32 insn;
19         atomic_t cpu_count;
20 };
21
22 #ifdef CONFIG_MMU
23 /*
24  * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
25  * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
26  * So use '__always_inline' and 'const unsigned int fixmap' here.
27  */
28 static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
29 {
30         uintptr_t uintaddr = (uintptr_t) addr;
31         struct page *page;
32
33         if (core_kernel_text(uintaddr))
34                 page = phys_to_page(__pa_symbol(addr));
35         else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
36                 page = vmalloc_to_page(addr);
37         else
38                 return addr;
39
40         BUG_ON(!page);
41
42         return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
43                                          (uintaddr & ~PAGE_MASK));
44 }
45
46 static void patch_unmap(int fixmap)
47 {
48         clear_fixmap(fixmap);
49 }
50 NOKPROBE_SYMBOL(patch_unmap);
51
52 static int patch_insn_write(void *addr, const void *insn, size_t len)
53 {
54         void *waddr = addr;
55         bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
56         int ret;
57
58         /*
59          * Before reaching here, it was expected to lock the text_mutex
60          * already, so we don't need to give another lock here and could
61          * ensure that it was safe between each cores.
62          */
63         lockdep_assert_held(&text_mutex);
64
65         if (across_pages)
66                 patch_map(addr + len, FIX_TEXT_POKE1);
67
68         waddr = patch_map(addr, FIX_TEXT_POKE0);
69
70         ret = copy_to_kernel_nofault(waddr, insn, len);
71
72         patch_unmap(FIX_TEXT_POKE0);
73
74         if (across_pages)
75                 patch_unmap(FIX_TEXT_POKE1);
76
77         return ret;
78 }
79 NOKPROBE_SYMBOL(patch_insn_write);
80 #else
81 static int patch_insn_write(void *addr, const void *insn, size_t len)
82 {
83         return copy_to_kernel_nofault(addr, insn, len);
84 }
85 NOKPROBE_SYMBOL(patch_insn_write);
86 #endif /* CONFIG_MMU */
87
88 int patch_text_nosync(void *addr, const void *insns, size_t len)
89 {
90         u32 *tp = addr;
91         int ret;
92
93         ret = patch_insn_write(tp, insns, len);
94
95         if (!ret)
96                 flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
97
98         return ret;
99 }
100 NOKPROBE_SYMBOL(patch_text_nosync);
101
102 static int patch_text_cb(void *data)
103 {
104         struct patch_insn *patch = data;
105         int ret = 0;
106
107         if (atomic_inc_return(&patch->cpu_count) == 1) {
108                 ret =
109                     patch_text_nosync(patch->addr, &patch->insn,
110                                             GET_INSN_LENGTH(patch->insn));
111                 atomic_inc(&patch->cpu_count);
112         } else {
113                 while (atomic_read(&patch->cpu_count) <= num_online_cpus())
114                         cpu_relax();
115                 smp_mb();
116         }
117
118         return ret;
119 }
120 NOKPROBE_SYMBOL(patch_text_cb);
121
122 int patch_text(void *addr, u32 insn)
123 {
124         struct patch_insn patch = {
125                 .addr = addr,
126                 .insn = insn,
127                 .cpu_count = ATOMIC_INIT(0),
128         };
129
130         return stop_machine_cpuslocked(patch_text_cb,
131                                        &patch, cpu_online_mask);
132 }
133 NOKPROBE_SYMBOL(patch_text);