Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6-microblaze.git] / arch / powerpc / kernel / hw_breakpoint_constraints.c
1 // SPDX-License-Identifier: GPL-2.0+
2 #include <linux/kernel.h>
3 #include <linux/uaccess.h>
4 #include <linux/sched.h>
5 #include <asm/hw_breakpoint.h>
6 #include <asm/sstep.h>
7 #include <asm/cache.h>
8
9 static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info)
10 {
11         return ((info->address <= dar) && (dar - info->address < info->len));
12 }
13
14 static bool ea_user_range_overlaps(unsigned long ea, int size,
15                                    struct arch_hw_breakpoint *info)
16 {
17         return ((ea < info->address + info->len) &&
18                 (ea + size > info->address));
19 }
20
21 static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info)
22 {
23         unsigned long hw_start_addr, hw_end_addr;
24
25         hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
26         hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
27
28         return ((hw_start_addr <= dar) && (hw_end_addr > dar));
29 }
30
31 static bool ea_hw_range_overlaps(unsigned long ea, int size,
32                                  struct arch_hw_breakpoint *info)
33 {
34         unsigned long hw_start_addr, hw_end_addr;
35         unsigned long align_size = HW_BREAKPOINT_SIZE;
36
37         /*
38          * On p10 predecessors, quadword is handle differently then
39          * other instructions.
40          */
41         if (!cpu_has_feature(CPU_FTR_ARCH_31) && size == 16)
42                 align_size = HW_BREAKPOINT_SIZE_QUADWORD;
43
44         hw_start_addr = ALIGN_DOWN(info->address, align_size);
45         hw_end_addr = ALIGN(info->address + info->len, align_size);
46
47         return ((ea < hw_end_addr) && (ea + size > hw_start_addr));
48 }
49
50 /*
51  * If hw has multiple DAWR registers, we also need to check all
52  * dawrx constraint bits to confirm this is _really_ a valid event.
53  * If type is UNKNOWN, but privilege level matches, consider it as
54  * a positive match.
55  */
56 static bool check_dawrx_constraints(struct pt_regs *regs, int type,
57                                     struct arch_hw_breakpoint *info)
58 {
59         if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ))
60                 return false;
61
62         /*
63          * The Cache Management instructions other than dcbz never
64          * cause a match. i.e. if type is CACHEOP, the instruction
65          * is dcbz, and dcbz is treated as Store.
66          */
67         if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & HW_BRK_TYPE_WRITE))
68                 return false;
69
70         if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL))
71                 return false;
72
73         if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER))
74                 return false;
75
76         return true;
77 }
78
79 /*
80  * Return true if the event is valid wrt dawr configuration,
81  * including extraneous exception. Otherwise return false.
82  */
83 bool wp_check_constraints(struct pt_regs *regs, struct ppc_inst instr,
84                           unsigned long ea, int type, int size,
85                           struct arch_hw_breakpoint *info)
86 {
87         bool in_user_range = dar_in_user_range(regs->dar, info);
88         bool dawrx_constraints;
89
90         /*
91          * 8xx supports only one breakpoint and thus we can
92          * unconditionally return true.
93          */
94         if (IS_ENABLED(CONFIG_PPC_8xx)) {
95                 if (!in_user_range)
96                         info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
97                 return true;
98         }
99
100         if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) {
101                 if (cpu_has_feature(CPU_FTR_ARCH_31) &&
102                     !dar_in_hw_range(regs->dar, info))
103                         return false;
104
105                 return true;
106         }
107
108         dawrx_constraints = check_dawrx_constraints(regs, type, info);
109
110         if (type == UNKNOWN) {
111                 if (cpu_has_feature(CPU_FTR_ARCH_31) &&
112                     !dar_in_hw_range(regs->dar, info))
113                         return false;
114
115                 return dawrx_constraints;
116         }
117
118         if (ea_user_range_overlaps(ea, size, info))
119                 return dawrx_constraints;
120
121         if (ea_hw_range_overlaps(ea, size, info)) {
122                 if (dawrx_constraints) {
123                         info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
124                         return true;
125                 }
126         }
127         return false;
128 }
129
130 static int cache_op_size(void)
131 {
132 #ifdef __powerpc64__
133         return ppc64_caches.l1d.block_size;
134 #else
135         return L1_CACHE_BYTES;
136 #endif
137 }
138
139 void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
140                          int *type, int *size, unsigned long *ea)
141 {
142         struct instruction_op op;
143
144         if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip))
145                 return;
146
147         analyse_instr(&op, regs, *instr);
148         *type = GETTYPE(op.type);
149         *ea = op.ea;
150 #ifdef __powerpc64__
151         if (!(regs->msr & MSR_64BIT))
152                 *ea &= 0xffffffffUL;
153 #endif
154
155         *size = GETSIZE(op.type);
156         if (*type == CACHEOP) {
157                 *size = cache_op_size();
158                 *ea &= ~(*size - 1);
159         } else if (*type == LOAD_VMX || *type == STORE_VMX) {
160                 *ea &= ~(*size - 1);
161         }
162 }