maccess: rename probe_kernel_{read,write} to copy_{from,to}_kernel_nofault
[linux-2.6-microblaze.git] / arch / arm64 / kernel / insn.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Huawei Ltd.
4  * Author: Jiang Liu <liuj97@gmail.com>
5  *
6  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7  */
8 #include <linux/bitops.h>
9 #include <linux/bug.h>
10 #include <linux/compiler.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/spinlock.h>
15 #include <linux/stop_machine.h>
16 #include <linux/types.h>
17 #include <linux/uaccess.h>
18
19 #include <asm/cacheflush.h>
20 #include <asm/debug-monitors.h>
21 #include <asm/fixmap.h>
22 #include <asm/insn.h>
23 #include <asm/kprobes.h>
24 #include <asm/sections.h>
25
26 #define AARCH64_INSN_SF_BIT     BIT(31)
27 #define AARCH64_INSN_N_BIT      BIT(22)
28 #define AARCH64_INSN_LSL_12     BIT(22)
29
30 static const int aarch64_insn_encoding_class[] = {
31         AARCH64_INSN_CLS_UNKNOWN,
32         AARCH64_INSN_CLS_UNKNOWN,
33         AARCH64_INSN_CLS_UNKNOWN,
34         AARCH64_INSN_CLS_UNKNOWN,
35         AARCH64_INSN_CLS_LDST,
36         AARCH64_INSN_CLS_DP_REG,
37         AARCH64_INSN_CLS_LDST,
38         AARCH64_INSN_CLS_DP_FPSIMD,
39         AARCH64_INSN_CLS_DP_IMM,
40         AARCH64_INSN_CLS_DP_IMM,
41         AARCH64_INSN_CLS_BR_SYS,
42         AARCH64_INSN_CLS_BR_SYS,
43         AARCH64_INSN_CLS_LDST,
44         AARCH64_INSN_CLS_DP_REG,
45         AARCH64_INSN_CLS_LDST,
46         AARCH64_INSN_CLS_DP_FPSIMD,
47 };
48
49 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
50 {
51         return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
52 }
53
54 bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
55 {
56         if (!aarch64_insn_is_hint(insn))
57                 return false;
58
59         switch (insn & 0xFE0) {
60         case AARCH64_INSN_HINT_XPACLRI:
61         case AARCH64_INSN_HINT_PACIA_1716:
62         case AARCH64_INSN_HINT_PACIB_1716:
63         case AARCH64_INSN_HINT_AUTIA_1716:
64         case AARCH64_INSN_HINT_AUTIB_1716:
65         case AARCH64_INSN_HINT_PACIAZ:
66         case AARCH64_INSN_HINT_PACIASP:
67         case AARCH64_INSN_HINT_PACIBZ:
68         case AARCH64_INSN_HINT_PACIBSP:
69         case AARCH64_INSN_HINT_AUTIAZ:
70         case AARCH64_INSN_HINT_AUTIASP:
71         case AARCH64_INSN_HINT_AUTIBZ:
72         case AARCH64_INSN_HINT_AUTIBSP:
73         case AARCH64_INSN_HINT_BTI:
74         case AARCH64_INSN_HINT_BTIC:
75         case AARCH64_INSN_HINT_BTIJ:
76         case AARCH64_INSN_HINT_BTIJC:
77         case AARCH64_INSN_HINT_NOP:
78                 return true;
79         default:
80                 return false;
81         }
82 }
83
84 bool aarch64_insn_is_branch_imm(u32 insn)
85 {
86         return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
87                 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
88                 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
89                 aarch64_insn_is_bcond(insn));
90 }
91
92 static DEFINE_RAW_SPINLOCK(patch_lock);
93
94 static bool is_exit_text(unsigned long addr)
95 {
96         /* discarded with init text/data */
97         return system_state < SYSTEM_RUNNING &&
98                 addr >= (unsigned long)__exittext_begin &&
99                 addr < (unsigned long)__exittext_end;
100 }
101
102 static bool is_image_text(unsigned long addr)
103 {
104         return core_kernel_text(addr) || is_exit_text(addr);
105 }
106
107 static void __kprobes *patch_map(void *addr, int fixmap)
108 {
109         unsigned long uintaddr = (uintptr_t) addr;
110         bool image = is_image_text(uintaddr);
111         struct page *page;
112
113         if (image)
114                 page = phys_to_page(__pa_symbol(addr));
115         else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
116                 page = vmalloc_to_page(addr);
117         else
118                 return addr;
119
120         BUG_ON(!page);
121         return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
122                         (uintaddr & ~PAGE_MASK));
123 }
124
125 static void __kprobes patch_unmap(int fixmap)
126 {
127         clear_fixmap(fixmap);
128 }
129 /*
130  * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
131  * little-endian.
132  */
133 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
134 {
135         int ret;
136         __le32 val;
137
138         ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
139         if (!ret)
140                 *insnp = le32_to_cpu(val);
141
142         return ret;
143 }
144
145 static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
146 {
147         void *waddr = addr;
148         unsigned long flags = 0;
149         int ret;
150
151         raw_spin_lock_irqsave(&patch_lock, flags);
152         waddr = patch_map(addr, FIX_TEXT_POKE0);
153
154         ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
155
156         patch_unmap(FIX_TEXT_POKE0);
157         raw_spin_unlock_irqrestore(&patch_lock, flags);
158
159         return ret;
160 }
161
162 int __kprobes aarch64_insn_write(void *addr, u32 insn)
163 {
164         return __aarch64_insn_write(addr, cpu_to_le32(insn));
165 }
166
167 bool __kprobes aarch64_insn_uses_literal(u32 insn)
168 {
169         /* ldr/ldrsw (literal), prfm */
170
171         return aarch64_insn_is_ldr_lit(insn) ||
172                 aarch64_insn_is_ldrsw_lit(insn) ||
173                 aarch64_insn_is_adr_adrp(insn) ||
174                 aarch64_insn_is_prfm_lit(insn);
175 }
176
177 bool __kprobes aarch64_insn_is_branch(u32 insn)
178 {
179         /* b, bl, cb*, tb*, b.cond, br, blr */
180
181         return aarch64_insn_is_b(insn) ||
182                 aarch64_insn_is_bl(insn) ||
183                 aarch64_insn_is_cbz(insn) ||
184                 aarch64_insn_is_cbnz(insn) ||
185                 aarch64_insn_is_tbz(insn) ||
186                 aarch64_insn_is_tbnz(insn) ||
187                 aarch64_insn_is_ret(insn) ||
188                 aarch64_insn_is_br(insn) ||
189                 aarch64_insn_is_blr(insn) ||
190                 aarch64_insn_is_bcond(insn);
191 }
192
193 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
194 {
195         u32 *tp = addr;
196         int ret;
197
198         /* A64 instructions must be word aligned */
199         if ((uintptr_t)tp & 0x3)
200                 return -EINVAL;
201
202         ret = aarch64_insn_write(tp, insn);
203         if (ret == 0)
204                 __flush_icache_range((uintptr_t)tp,
205                                      (uintptr_t)tp + AARCH64_INSN_SIZE);
206
207         return ret;
208 }
209
210 struct aarch64_insn_patch {
211         void            **text_addrs;
212         u32             *new_insns;
213         int             insn_cnt;
214         atomic_t        cpu_count;
215 };
216
217 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
218 {
219         int i, ret = 0;
220         struct aarch64_insn_patch *pp = arg;
221
222         /* The first CPU becomes master */
223         if (atomic_inc_return(&pp->cpu_count) == 1) {
224                 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
225                         ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
226                                                              pp->new_insns[i]);
227                 /* Notify other processors with an additional increment. */
228                 atomic_inc(&pp->cpu_count);
229         } else {
230                 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
231                         cpu_relax();
232                 isb();
233         }
234
235         return ret;
236 }
237
238 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
239 {
240         struct aarch64_insn_patch patch = {
241                 .text_addrs = addrs,
242                 .new_insns = insns,
243                 .insn_cnt = cnt,
244                 .cpu_count = ATOMIC_INIT(0),
245         };
246
247         if (cnt <= 0)
248                 return -EINVAL;
249
250         return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
251                                        cpu_online_mask);
252 }
253
254 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
255                                                 u32 *maskp, int *shiftp)
256 {
257         u32 mask;
258         int shift;
259
260         switch (type) {
261         case AARCH64_INSN_IMM_26:
262                 mask = BIT(26) - 1;
263                 shift = 0;
264                 break;
265         case AARCH64_INSN_IMM_19:
266                 mask = BIT(19) - 1;
267                 shift = 5;
268                 break;
269         case AARCH64_INSN_IMM_16:
270                 mask = BIT(16) - 1;
271                 shift = 5;
272                 break;
273         case AARCH64_INSN_IMM_14:
274                 mask = BIT(14) - 1;
275                 shift = 5;
276                 break;
277         case AARCH64_INSN_IMM_12:
278                 mask = BIT(12) - 1;
279                 shift = 10;
280                 break;
281         case AARCH64_INSN_IMM_9:
282                 mask = BIT(9) - 1;
283                 shift = 12;
284                 break;
285         case AARCH64_INSN_IMM_7:
286                 mask = BIT(7) - 1;
287                 shift = 15;
288                 break;
289         case AARCH64_INSN_IMM_6:
290         case AARCH64_INSN_IMM_S:
291                 mask = BIT(6) - 1;
292                 shift = 10;
293                 break;
294         case AARCH64_INSN_IMM_R:
295                 mask = BIT(6) - 1;
296                 shift = 16;
297                 break;
298         case AARCH64_INSN_IMM_N:
299                 mask = 1;
300                 shift = 22;
301                 break;
302         default:
303                 return -EINVAL;
304         }
305
306         *maskp = mask;
307         *shiftp = shift;
308
309         return 0;
310 }
311
312 #define ADR_IMM_HILOSPLIT       2
313 #define ADR_IMM_SIZE            SZ_2M
314 #define ADR_IMM_LOMASK          ((1 << ADR_IMM_HILOSPLIT) - 1)
315 #define ADR_IMM_HIMASK          ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
316 #define ADR_IMM_LOSHIFT         29
317 #define ADR_IMM_HISHIFT         5
318
319 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
320 {
321         u32 immlo, immhi, mask;
322         int shift;
323
324         switch (type) {
325         case AARCH64_INSN_IMM_ADR:
326                 shift = 0;
327                 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
328                 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
329                 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
330                 mask = ADR_IMM_SIZE - 1;
331                 break;
332         default:
333                 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
334                         pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
335                                type);
336                         return 0;
337                 }
338         }
339
340         return (insn >> shift) & mask;
341 }
342
343 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
344                                   u32 insn, u64 imm)
345 {
346         u32 immlo, immhi, mask;
347         int shift;
348
349         if (insn == AARCH64_BREAK_FAULT)
350                 return AARCH64_BREAK_FAULT;
351
352         switch (type) {
353         case AARCH64_INSN_IMM_ADR:
354                 shift = 0;
355                 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
356                 imm >>= ADR_IMM_HILOSPLIT;
357                 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
358                 imm = immlo | immhi;
359                 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
360                         (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
361                 break;
362         default:
363                 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
364                         pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
365                                type);
366                         return AARCH64_BREAK_FAULT;
367                 }
368         }
369
370         /* Update the immediate field. */
371         insn &= ~(mask << shift);
372         insn |= (imm & mask) << shift;
373
374         return insn;
375 }
376
377 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
378                                         u32 insn)
379 {
380         int shift;
381
382         switch (type) {
383         case AARCH64_INSN_REGTYPE_RT:
384         case AARCH64_INSN_REGTYPE_RD:
385                 shift = 0;
386                 break;
387         case AARCH64_INSN_REGTYPE_RN:
388                 shift = 5;
389                 break;
390         case AARCH64_INSN_REGTYPE_RT2:
391         case AARCH64_INSN_REGTYPE_RA:
392                 shift = 10;
393                 break;
394         case AARCH64_INSN_REGTYPE_RM:
395                 shift = 16;
396                 break;
397         default:
398                 pr_err("%s: unknown register type encoding %d\n", __func__,
399                        type);
400                 return 0;
401         }
402
403         return (insn >> shift) & GENMASK(4, 0);
404 }
405
406 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
407                                         u32 insn,
408                                         enum aarch64_insn_register reg)
409 {
410         int shift;
411
412         if (insn == AARCH64_BREAK_FAULT)
413                 return AARCH64_BREAK_FAULT;
414
415         if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
416                 pr_err("%s: unknown register encoding %d\n", __func__, reg);
417                 return AARCH64_BREAK_FAULT;
418         }
419
420         switch (type) {
421         case AARCH64_INSN_REGTYPE_RT:
422         case AARCH64_INSN_REGTYPE_RD:
423                 shift = 0;
424                 break;
425         case AARCH64_INSN_REGTYPE_RN:
426                 shift = 5;
427                 break;
428         case AARCH64_INSN_REGTYPE_RT2:
429         case AARCH64_INSN_REGTYPE_RA:
430                 shift = 10;
431                 break;
432         case AARCH64_INSN_REGTYPE_RM:
433         case AARCH64_INSN_REGTYPE_RS:
434                 shift = 16;
435                 break;
436         default:
437                 pr_err("%s: unknown register type encoding %d\n", __func__,
438                        type);
439                 return AARCH64_BREAK_FAULT;
440         }
441
442         insn &= ~(GENMASK(4, 0) << shift);
443         insn |= reg << shift;
444
445         return insn;
446 }
447
448 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
449                                          u32 insn)
450 {
451         u32 size;
452
453         switch (type) {
454         case AARCH64_INSN_SIZE_8:
455                 size = 0;
456                 break;
457         case AARCH64_INSN_SIZE_16:
458                 size = 1;
459                 break;
460         case AARCH64_INSN_SIZE_32:
461                 size = 2;
462                 break;
463         case AARCH64_INSN_SIZE_64:
464                 size = 3;
465                 break;
466         default:
467                 pr_err("%s: unknown size encoding %d\n", __func__, type);
468                 return AARCH64_BREAK_FAULT;
469         }
470
471         insn &= ~GENMASK(31, 30);
472         insn |= size << 30;
473
474         return insn;
475 }
476
477 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
478                                      long range)
479 {
480         long offset;
481
482         if ((pc & 0x3) || (addr & 0x3)) {
483                 pr_err("%s: A64 instructions must be word aligned\n", __func__);
484                 return range;
485         }
486
487         offset = ((long)addr - (long)pc);
488
489         if (offset < -range || offset >= range) {
490                 pr_err("%s: offset out of range\n", __func__);
491                 return range;
492         }
493
494         return offset;
495 }
496
497 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
498                                           enum aarch64_insn_branch_type type)
499 {
500         u32 insn;
501         long offset;
502
503         /*
504          * B/BL support [-128M, 128M) offset
505          * ARM64 virtual address arrangement guarantees all kernel and module
506          * texts are within +/-128M.
507          */
508         offset = branch_imm_common(pc, addr, SZ_128M);
509         if (offset >= SZ_128M)
510                 return AARCH64_BREAK_FAULT;
511
512         switch (type) {
513         case AARCH64_INSN_BRANCH_LINK:
514                 insn = aarch64_insn_get_bl_value();
515                 break;
516         case AARCH64_INSN_BRANCH_NOLINK:
517                 insn = aarch64_insn_get_b_value();
518                 break;
519         default:
520                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
521                 return AARCH64_BREAK_FAULT;
522         }
523
524         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
525                                              offset >> 2);
526 }
527
528 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
529                                      enum aarch64_insn_register reg,
530                                      enum aarch64_insn_variant variant,
531                                      enum aarch64_insn_branch_type type)
532 {
533         u32 insn;
534         long offset;
535
536         offset = branch_imm_common(pc, addr, SZ_1M);
537         if (offset >= SZ_1M)
538                 return AARCH64_BREAK_FAULT;
539
540         switch (type) {
541         case AARCH64_INSN_BRANCH_COMP_ZERO:
542                 insn = aarch64_insn_get_cbz_value();
543                 break;
544         case AARCH64_INSN_BRANCH_COMP_NONZERO:
545                 insn = aarch64_insn_get_cbnz_value();
546                 break;
547         default:
548                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
549                 return AARCH64_BREAK_FAULT;
550         }
551
552         switch (variant) {
553         case AARCH64_INSN_VARIANT_32BIT:
554                 break;
555         case AARCH64_INSN_VARIANT_64BIT:
556                 insn |= AARCH64_INSN_SF_BIT;
557                 break;
558         default:
559                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
560                 return AARCH64_BREAK_FAULT;
561         }
562
563         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
564
565         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
566                                              offset >> 2);
567 }
568
569 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
570                                      enum aarch64_insn_condition cond)
571 {
572         u32 insn;
573         long offset;
574
575         offset = branch_imm_common(pc, addr, SZ_1M);
576
577         insn = aarch64_insn_get_bcond_value();
578
579         if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
580                 pr_err("%s: unknown condition encoding %d\n", __func__, cond);
581                 return AARCH64_BREAK_FAULT;
582         }
583         insn |= cond;
584
585         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
586                                              offset >> 2);
587 }
588
589 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
590 {
591         return aarch64_insn_get_hint_value() | op;
592 }
593
594 u32 __kprobes aarch64_insn_gen_nop(void)
595 {
596         return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
597 }
598
599 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
600                                 enum aarch64_insn_branch_type type)
601 {
602         u32 insn;
603
604         switch (type) {
605         case AARCH64_INSN_BRANCH_NOLINK:
606                 insn = aarch64_insn_get_br_value();
607                 break;
608         case AARCH64_INSN_BRANCH_LINK:
609                 insn = aarch64_insn_get_blr_value();
610                 break;
611         case AARCH64_INSN_BRANCH_RETURN:
612                 insn = aarch64_insn_get_ret_value();
613                 break;
614         default:
615                 pr_err("%s: unknown branch encoding %d\n", __func__, type);
616                 return AARCH64_BREAK_FAULT;
617         }
618
619         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
620 }
621
622 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
623                                     enum aarch64_insn_register base,
624                                     enum aarch64_insn_register offset,
625                                     enum aarch64_insn_size_type size,
626                                     enum aarch64_insn_ldst_type type)
627 {
628         u32 insn;
629
630         switch (type) {
631         case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
632                 insn = aarch64_insn_get_ldr_reg_value();
633                 break;
634         case AARCH64_INSN_LDST_STORE_REG_OFFSET:
635                 insn = aarch64_insn_get_str_reg_value();
636                 break;
637         default:
638                 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
639                 return AARCH64_BREAK_FAULT;
640         }
641
642         insn = aarch64_insn_encode_ldst_size(size, insn);
643
644         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
645
646         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
647                                             base);
648
649         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
650                                             offset);
651 }
652
653 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
654                                      enum aarch64_insn_register reg2,
655                                      enum aarch64_insn_register base,
656                                      int offset,
657                                      enum aarch64_insn_variant variant,
658                                      enum aarch64_insn_ldst_type type)
659 {
660         u32 insn;
661         int shift;
662
663         switch (type) {
664         case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
665                 insn = aarch64_insn_get_ldp_pre_value();
666                 break;
667         case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
668                 insn = aarch64_insn_get_stp_pre_value();
669                 break;
670         case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
671                 insn = aarch64_insn_get_ldp_post_value();
672                 break;
673         case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
674                 insn = aarch64_insn_get_stp_post_value();
675                 break;
676         default:
677                 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
678                 return AARCH64_BREAK_FAULT;
679         }
680
681         switch (variant) {
682         case AARCH64_INSN_VARIANT_32BIT:
683                 if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
684                         pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
685                                __func__, offset);
686                         return AARCH64_BREAK_FAULT;
687                 }
688                 shift = 2;
689                 break;
690         case AARCH64_INSN_VARIANT_64BIT:
691                 if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
692                         pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
693                                __func__, offset);
694                         return AARCH64_BREAK_FAULT;
695                 }
696                 shift = 3;
697                 insn |= AARCH64_INSN_SF_BIT;
698                 break;
699         default:
700                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
701                 return AARCH64_BREAK_FAULT;
702         }
703
704         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
705                                             reg1);
706
707         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
708                                             reg2);
709
710         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
711                                             base);
712
713         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
714                                              offset >> shift);
715 }
716
717 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
718                                    enum aarch64_insn_register base,
719                                    enum aarch64_insn_register state,
720                                    enum aarch64_insn_size_type size,
721                                    enum aarch64_insn_ldst_type type)
722 {
723         u32 insn;
724
725         switch (type) {
726         case AARCH64_INSN_LDST_LOAD_EX:
727                 insn = aarch64_insn_get_load_ex_value();
728                 break;
729         case AARCH64_INSN_LDST_STORE_EX:
730                 insn = aarch64_insn_get_store_ex_value();
731                 break;
732         default:
733                 pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
734                 return AARCH64_BREAK_FAULT;
735         }
736
737         insn = aarch64_insn_encode_ldst_size(size, insn);
738
739         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
740                                             reg);
741
742         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
743                                             base);
744
745         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
746                                             AARCH64_INSN_REG_ZR);
747
748         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
749                                             state);
750 }
751
752 u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
753                            enum aarch64_insn_register address,
754                            enum aarch64_insn_register value,
755                            enum aarch64_insn_size_type size)
756 {
757         u32 insn = aarch64_insn_get_ldadd_value();
758
759         switch (size) {
760         case AARCH64_INSN_SIZE_32:
761         case AARCH64_INSN_SIZE_64:
762                 break;
763         default:
764                 pr_err("%s: unimplemented size encoding %d\n", __func__, size);
765                 return AARCH64_BREAK_FAULT;
766         }
767
768         insn = aarch64_insn_encode_ldst_size(size, insn);
769
770         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
771                                             result);
772
773         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
774                                             address);
775
776         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
777                                             value);
778 }
779
780 u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
781                            enum aarch64_insn_register value,
782                            enum aarch64_insn_size_type size)
783 {
784         /*
785          * STADD is simply encoded as an alias for LDADD with XZR as
786          * the destination register.
787          */
788         return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
789                                       value, size);
790 }
791
792 static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
793                                         enum aarch64_insn_prfm_target target,
794                                         enum aarch64_insn_prfm_policy policy,
795                                         u32 insn)
796 {
797         u32 imm_type = 0, imm_target = 0, imm_policy = 0;
798
799         switch (type) {
800         case AARCH64_INSN_PRFM_TYPE_PLD:
801                 break;
802         case AARCH64_INSN_PRFM_TYPE_PLI:
803                 imm_type = BIT(0);
804                 break;
805         case AARCH64_INSN_PRFM_TYPE_PST:
806                 imm_type = BIT(1);
807                 break;
808         default:
809                 pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
810                 return AARCH64_BREAK_FAULT;
811         }
812
813         switch (target) {
814         case AARCH64_INSN_PRFM_TARGET_L1:
815                 break;
816         case AARCH64_INSN_PRFM_TARGET_L2:
817                 imm_target = BIT(0);
818                 break;
819         case AARCH64_INSN_PRFM_TARGET_L3:
820                 imm_target = BIT(1);
821                 break;
822         default:
823                 pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
824                 return AARCH64_BREAK_FAULT;
825         }
826
827         switch (policy) {
828         case AARCH64_INSN_PRFM_POLICY_KEEP:
829                 break;
830         case AARCH64_INSN_PRFM_POLICY_STRM:
831                 imm_policy = BIT(0);
832                 break;
833         default:
834                 pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
835                 return AARCH64_BREAK_FAULT;
836         }
837
838         /* In this case, imm5 is encoded into Rt field. */
839         insn &= ~GENMASK(4, 0);
840         insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
841
842         return insn;
843 }
844
845 u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
846                               enum aarch64_insn_prfm_type type,
847                               enum aarch64_insn_prfm_target target,
848                               enum aarch64_insn_prfm_policy policy)
849 {
850         u32 insn = aarch64_insn_get_prfm_value();
851
852         insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
853
854         insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
855
856         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
857                                             base);
858
859         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
860 }
861
862 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
863                                  enum aarch64_insn_register src,
864                                  int imm, enum aarch64_insn_variant variant,
865                                  enum aarch64_insn_adsb_type type)
866 {
867         u32 insn;
868
869         switch (type) {
870         case AARCH64_INSN_ADSB_ADD:
871                 insn = aarch64_insn_get_add_imm_value();
872                 break;
873         case AARCH64_INSN_ADSB_SUB:
874                 insn = aarch64_insn_get_sub_imm_value();
875                 break;
876         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
877                 insn = aarch64_insn_get_adds_imm_value();
878                 break;
879         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
880                 insn = aarch64_insn_get_subs_imm_value();
881                 break;
882         default:
883                 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
884                 return AARCH64_BREAK_FAULT;
885         }
886
887         switch (variant) {
888         case AARCH64_INSN_VARIANT_32BIT:
889                 break;
890         case AARCH64_INSN_VARIANT_64BIT:
891                 insn |= AARCH64_INSN_SF_BIT;
892                 break;
893         default:
894                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
895                 return AARCH64_BREAK_FAULT;
896         }
897
898         /* We can't encode more than a 24bit value (12bit + 12bit shift) */
899         if (imm & ~(BIT(24) - 1))
900                 goto out;
901
902         /* If we have something in the top 12 bits... */
903         if (imm & ~(SZ_4K - 1)) {
904                 /* ... and in the low 12 bits -> error */
905                 if (imm & (SZ_4K - 1))
906                         goto out;
907
908                 imm >>= 12;
909                 insn |= AARCH64_INSN_LSL_12;
910         }
911
912         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
913
914         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
915
916         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
917
918 out:
919         pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
920         return AARCH64_BREAK_FAULT;
921 }
922
923 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
924                               enum aarch64_insn_register src,
925                               int immr, int imms,
926                               enum aarch64_insn_variant variant,
927                               enum aarch64_insn_bitfield_type type)
928 {
929         u32 insn;
930         u32 mask;
931
932         switch (type) {
933         case AARCH64_INSN_BITFIELD_MOVE:
934                 insn = aarch64_insn_get_bfm_value();
935                 break;
936         case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
937                 insn = aarch64_insn_get_ubfm_value();
938                 break;
939         case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
940                 insn = aarch64_insn_get_sbfm_value();
941                 break;
942         default:
943                 pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
944                 return AARCH64_BREAK_FAULT;
945         }
946
947         switch (variant) {
948         case AARCH64_INSN_VARIANT_32BIT:
949                 mask = GENMASK(4, 0);
950                 break;
951         case AARCH64_INSN_VARIANT_64BIT:
952                 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
953                 mask = GENMASK(5, 0);
954                 break;
955         default:
956                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
957                 return AARCH64_BREAK_FAULT;
958         }
959
960         if (immr & ~mask) {
961                 pr_err("%s: invalid immr encoding %d\n", __func__, immr);
962                 return AARCH64_BREAK_FAULT;
963         }
964         if (imms & ~mask) {
965                 pr_err("%s: invalid imms encoding %d\n", __func__, imms);
966                 return AARCH64_BREAK_FAULT;
967         }
968
969         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
970
971         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
972
973         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
974
975         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
976 }
977
978 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
979                               int imm, int shift,
980                               enum aarch64_insn_variant variant,
981                               enum aarch64_insn_movewide_type type)
982 {
983         u32 insn;
984
985         switch (type) {
986         case AARCH64_INSN_MOVEWIDE_ZERO:
987                 insn = aarch64_insn_get_movz_value();
988                 break;
989         case AARCH64_INSN_MOVEWIDE_KEEP:
990                 insn = aarch64_insn_get_movk_value();
991                 break;
992         case AARCH64_INSN_MOVEWIDE_INVERSE:
993                 insn = aarch64_insn_get_movn_value();
994                 break;
995         default:
996                 pr_err("%s: unknown movewide encoding %d\n", __func__, type);
997                 return AARCH64_BREAK_FAULT;
998         }
999
1000         if (imm & ~(SZ_64K - 1)) {
1001                 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
1002                 return AARCH64_BREAK_FAULT;
1003         }
1004
1005         switch (variant) {
1006         case AARCH64_INSN_VARIANT_32BIT:
1007                 if (shift != 0 && shift != 16) {
1008                         pr_err("%s: invalid shift encoding %d\n", __func__,
1009                                shift);
1010                         return AARCH64_BREAK_FAULT;
1011                 }
1012                 break;
1013         case AARCH64_INSN_VARIANT_64BIT:
1014                 insn |= AARCH64_INSN_SF_BIT;
1015                 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
1016                         pr_err("%s: invalid shift encoding %d\n", __func__,
1017                                shift);
1018                         return AARCH64_BREAK_FAULT;
1019                 }
1020                 break;
1021         default:
1022                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1023                 return AARCH64_BREAK_FAULT;
1024         }
1025
1026         insn |= (shift >> 4) << 21;
1027
1028         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1029
1030         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
1031 }
1032
1033 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
1034                                          enum aarch64_insn_register src,
1035                                          enum aarch64_insn_register reg,
1036                                          int shift,
1037                                          enum aarch64_insn_variant variant,
1038                                          enum aarch64_insn_adsb_type type)
1039 {
1040         u32 insn;
1041
1042         switch (type) {
1043         case AARCH64_INSN_ADSB_ADD:
1044                 insn = aarch64_insn_get_add_value();
1045                 break;
1046         case AARCH64_INSN_ADSB_SUB:
1047                 insn = aarch64_insn_get_sub_value();
1048                 break;
1049         case AARCH64_INSN_ADSB_ADD_SETFLAGS:
1050                 insn = aarch64_insn_get_adds_value();
1051                 break;
1052         case AARCH64_INSN_ADSB_SUB_SETFLAGS:
1053                 insn = aarch64_insn_get_subs_value();
1054                 break;
1055         default:
1056                 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
1057                 return AARCH64_BREAK_FAULT;
1058         }
1059
1060         switch (variant) {
1061         case AARCH64_INSN_VARIANT_32BIT:
1062                 if (shift & ~(SZ_32 - 1)) {
1063                         pr_err("%s: invalid shift encoding %d\n", __func__,
1064                                shift);
1065                         return AARCH64_BREAK_FAULT;
1066                 }
1067                 break;
1068         case AARCH64_INSN_VARIANT_64BIT:
1069                 insn |= AARCH64_INSN_SF_BIT;
1070                 if (shift & ~(SZ_64 - 1)) {
1071                         pr_err("%s: invalid shift encoding %d\n", __func__,
1072                                shift);
1073                         return AARCH64_BREAK_FAULT;
1074                 }
1075                 break;
1076         default:
1077                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1078                 return AARCH64_BREAK_FAULT;
1079         }
1080
1081
1082         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1083
1084         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1085
1086         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1087
1088         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1089 }
1090
1091 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
1092                            enum aarch64_insn_register src,
1093                            enum aarch64_insn_variant variant,
1094                            enum aarch64_insn_data1_type type)
1095 {
1096         u32 insn;
1097
1098         switch (type) {
1099         case AARCH64_INSN_DATA1_REVERSE_16:
1100                 insn = aarch64_insn_get_rev16_value();
1101                 break;
1102         case AARCH64_INSN_DATA1_REVERSE_32:
1103                 insn = aarch64_insn_get_rev32_value();
1104                 break;
1105         case AARCH64_INSN_DATA1_REVERSE_64:
1106                 if (variant != AARCH64_INSN_VARIANT_64BIT) {
1107                         pr_err("%s: invalid variant for reverse64 %d\n",
1108                                __func__, variant);
1109                         return AARCH64_BREAK_FAULT;
1110                 }
1111                 insn = aarch64_insn_get_rev64_value();
1112                 break;
1113         default:
1114                 pr_err("%s: unknown data1 encoding %d\n", __func__, type);
1115                 return AARCH64_BREAK_FAULT;
1116         }
1117
1118         switch (variant) {
1119         case AARCH64_INSN_VARIANT_32BIT:
1120                 break;
1121         case AARCH64_INSN_VARIANT_64BIT:
1122                 insn |= AARCH64_INSN_SF_BIT;
1123                 break;
1124         default:
1125                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1126                 return AARCH64_BREAK_FAULT;
1127         }
1128
1129         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1130
1131         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1132 }
1133
1134 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1135                            enum aarch64_insn_register src,
1136                            enum aarch64_insn_register reg,
1137                            enum aarch64_insn_variant variant,
1138                            enum aarch64_insn_data2_type type)
1139 {
1140         u32 insn;
1141
1142         switch (type) {
1143         case AARCH64_INSN_DATA2_UDIV:
1144                 insn = aarch64_insn_get_udiv_value();
1145                 break;
1146         case AARCH64_INSN_DATA2_SDIV:
1147                 insn = aarch64_insn_get_sdiv_value();
1148                 break;
1149         case AARCH64_INSN_DATA2_LSLV:
1150                 insn = aarch64_insn_get_lslv_value();
1151                 break;
1152         case AARCH64_INSN_DATA2_LSRV:
1153                 insn = aarch64_insn_get_lsrv_value();
1154                 break;
1155         case AARCH64_INSN_DATA2_ASRV:
1156                 insn = aarch64_insn_get_asrv_value();
1157                 break;
1158         case AARCH64_INSN_DATA2_RORV:
1159                 insn = aarch64_insn_get_rorv_value();
1160                 break;
1161         default:
1162                 pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1163                 return AARCH64_BREAK_FAULT;
1164         }
1165
1166         switch (variant) {
1167         case AARCH64_INSN_VARIANT_32BIT:
1168                 break;
1169         case AARCH64_INSN_VARIANT_64BIT:
1170                 insn |= AARCH64_INSN_SF_BIT;
1171                 break;
1172         default:
1173                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1174                 return AARCH64_BREAK_FAULT;
1175         }
1176
1177         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1178
1179         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1180
1181         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1182 }
1183
1184 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1185                            enum aarch64_insn_register src,
1186                            enum aarch64_insn_register reg1,
1187                            enum aarch64_insn_register reg2,
1188                            enum aarch64_insn_variant variant,
1189                            enum aarch64_insn_data3_type type)
1190 {
1191         u32 insn;
1192
1193         switch (type) {
1194         case AARCH64_INSN_DATA3_MADD:
1195                 insn = aarch64_insn_get_madd_value();
1196                 break;
1197         case AARCH64_INSN_DATA3_MSUB:
1198                 insn = aarch64_insn_get_msub_value();
1199                 break;
1200         default:
1201                 pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1202                 return AARCH64_BREAK_FAULT;
1203         }
1204
1205         switch (variant) {
1206         case AARCH64_INSN_VARIANT_32BIT:
1207                 break;
1208         case AARCH64_INSN_VARIANT_64BIT:
1209                 insn |= AARCH64_INSN_SF_BIT;
1210                 break;
1211         default:
1212                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1213                 return AARCH64_BREAK_FAULT;
1214         }
1215
1216         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1217
1218         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1219
1220         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1221                                             reg1);
1222
1223         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1224                                             reg2);
1225 }
1226
1227 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1228                                          enum aarch64_insn_register src,
1229                                          enum aarch64_insn_register reg,
1230                                          int shift,
1231                                          enum aarch64_insn_variant variant,
1232                                          enum aarch64_insn_logic_type type)
1233 {
1234         u32 insn;
1235
1236         switch (type) {
1237         case AARCH64_INSN_LOGIC_AND:
1238                 insn = aarch64_insn_get_and_value();
1239                 break;
1240         case AARCH64_INSN_LOGIC_BIC:
1241                 insn = aarch64_insn_get_bic_value();
1242                 break;
1243         case AARCH64_INSN_LOGIC_ORR:
1244                 insn = aarch64_insn_get_orr_value();
1245                 break;
1246         case AARCH64_INSN_LOGIC_ORN:
1247                 insn = aarch64_insn_get_orn_value();
1248                 break;
1249         case AARCH64_INSN_LOGIC_EOR:
1250                 insn = aarch64_insn_get_eor_value();
1251                 break;
1252         case AARCH64_INSN_LOGIC_EON:
1253                 insn = aarch64_insn_get_eon_value();
1254                 break;
1255         case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1256                 insn = aarch64_insn_get_ands_value();
1257                 break;
1258         case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1259                 insn = aarch64_insn_get_bics_value();
1260                 break;
1261         default:
1262                 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1263                 return AARCH64_BREAK_FAULT;
1264         }
1265
1266         switch (variant) {
1267         case AARCH64_INSN_VARIANT_32BIT:
1268                 if (shift & ~(SZ_32 - 1)) {
1269                         pr_err("%s: invalid shift encoding %d\n", __func__,
1270                                shift);
1271                         return AARCH64_BREAK_FAULT;
1272                 }
1273                 break;
1274         case AARCH64_INSN_VARIANT_64BIT:
1275                 insn |= AARCH64_INSN_SF_BIT;
1276                 if (shift & ~(SZ_64 - 1)) {
1277                         pr_err("%s: invalid shift encoding %d\n", __func__,
1278                                shift);
1279                         return AARCH64_BREAK_FAULT;
1280                 }
1281                 break;
1282         default:
1283                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1284                 return AARCH64_BREAK_FAULT;
1285         }
1286
1287
1288         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1289
1290         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1291
1292         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1293
1294         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1295 }
1296
1297 /*
1298  * MOV (register) is architecturally an alias of ORR (shifted register) where
1299  * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
1300  */
1301 u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
1302                               enum aarch64_insn_register src,
1303                               enum aarch64_insn_variant variant)
1304 {
1305         return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
1306                                                     src, 0, variant,
1307                                                     AARCH64_INSN_LOGIC_ORR);
1308 }
1309
1310 u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
1311                          enum aarch64_insn_register reg,
1312                          enum aarch64_insn_adr_type type)
1313 {
1314         u32 insn;
1315         s32 offset;
1316
1317         switch (type) {
1318         case AARCH64_INSN_ADR_TYPE_ADR:
1319                 insn = aarch64_insn_get_adr_value();
1320                 offset = addr - pc;
1321                 break;
1322         case AARCH64_INSN_ADR_TYPE_ADRP:
1323                 insn = aarch64_insn_get_adrp_value();
1324                 offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
1325                 break;
1326         default:
1327                 pr_err("%s: unknown adr encoding %d\n", __func__, type);
1328                 return AARCH64_BREAK_FAULT;
1329         }
1330
1331         if (offset < -SZ_1M || offset >= SZ_1M)
1332                 return AARCH64_BREAK_FAULT;
1333
1334         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
1335
1336         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
1337 }
1338
1339 /*
1340  * Decode the imm field of a branch, and return the byte offset as a
1341  * signed value (so it can be used when computing a new branch
1342  * target).
1343  */
1344 s32 aarch64_get_branch_offset(u32 insn)
1345 {
1346         s32 imm;
1347
1348         if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1349                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1350                 return (imm << 6) >> 4;
1351         }
1352
1353         if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1354             aarch64_insn_is_bcond(insn)) {
1355                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1356                 return (imm << 13) >> 11;
1357         }
1358
1359         if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1360                 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1361                 return (imm << 18) >> 16;
1362         }
1363
1364         /* Unhandled instruction */
1365         BUG();
1366 }
1367
1368 /*
1369  * Encode the displacement of a branch in the imm field and return the
1370  * updated instruction.
1371  */
1372 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1373 {
1374         if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1375                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1376                                                      offset >> 2);
1377
1378         if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1379             aarch64_insn_is_bcond(insn))
1380                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1381                                                      offset >> 2);
1382
1383         if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1384                 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1385                                                      offset >> 2);
1386
1387         /* Unhandled instruction */
1388         BUG();
1389 }
1390
1391 s32 aarch64_insn_adrp_get_offset(u32 insn)
1392 {
1393         BUG_ON(!aarch64_insn_is_adrp(insn));
1394         return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1395 }
1396
1397 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1398 {
1399         BUG_ON(!aarch64_insn_is_adrp(insn));
1400         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1401                                                 offset >> 12);
1402 }
1403
1404 /*
1405  * Extract the Op/CR data from a msr/mrs instruction.
1406  */
1407 u32 aarch64_insn_extract_system_reg(u32 insn)
1408 {
1409         return (insn & 0x1FFFE0) >> 5;
1410 }
1411
1412 bool aarch32_insn_is_wide(u32 insn)
1413 {
1414         return insn >= 0xe800;
1415 }
1416
1417 /*
1418  * Macros/defines for extracting register numbers from instruction.
1419  */
1420 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1421 {
1422         return (insn & (0xf << offset)) >> offset;
1423 }
1424
1425 #define OPC2_MASK       0x7
1426 #define OPC2_OFFSET     5
1427 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1428 {
1429         return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1430 }
1431
1432 #define CRM_MASK        0xf
1433 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1434 {
1435         return insn & CRM_MASK;
1436 }
1437
1438 static bool __kprobes __check_eq(unsigned long pstate)
1439 {
1440         return (pstate & PSR_Z_BIT) != 0;
1441 }
1442
1443 static bool __kprobes __check_ne(unsigned long pstate)
1444 {
1445         return (pstate & PSR_Z_BIT) == 0;
1446 }
1447
1448 static bool __kprobes __check_cs(unsigned long pstate)
1449 {
1450         return (pstate & PSR_C_BIT) != 0;
1451 }
1452
1453 static bool __kprobes __check_cc(unsigned long pstate)
1454 {
1455         return (pstate & PSR_C_BIT) == 0;
1456 }
1457
1458 static bool __kprobes __check_mi(unsigned long pstate)
1459 {
1460         return (pstate & PSR_N_BIT) != 0;
1461 }
1462
1463 static bool __kprobes __check_pl(unsigned long pstate)
1464 {
1465         return (pstate & PSR_N_BIT) == 0;
1466 }
1467
1468 static bool __kprobes __check_vs(unsigned long pstate)
1469 {
1470         return (pstate & PSR_V_BIT) != 0;
1471 }
1472
1473 static bool __kprobes __check_vc(unsigned long pstate)
1474 {
1475         return (pstate & PSR_V_BIT) == 0;
1476 }
1477
1478 static bool __kprobes __check_hi(unsigned long pstate)
1479 {
1480         pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
1481         return (pstate & PSR_C_BIT) != 0;
1482 }
1483
1484 static bool __kprobes __check_ls(unsigned long pstate)
1485 {
1486         pstate &= ~(pstate >> 1);       /* PSR_C_BIT &= ~PSR_Z_BIT */
1487         return (pstate & PSR_C_BIT) == 0;
1488 }
1489
1490 static bool __kprobes __check_ge(unsigned long pstate)
1491 {
1492         pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
1493         return (pstate & PSR_N_BIT) == 0;
1494 }
1495
1496 static bool __kprobes __check_lt(unsigned long pstate)
1497 {
1498         pstate ^= (pstate << 3);        /* PSR_N_BIT ^= PSR_V_BIT */
1499         return (pstate & PSR_N_BIT) != 0;
1500 }
1501
1502 static bool __kprobes __check_gt(unsigned long pstate)
1503 {
1504         /*PSR_N_BIT ^= PSR_V_BIT */
1505         unsigned long temp = pstate ^ (pstate << 3);
1506
1507         temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
1508         return (temp & PSR_N_BIT) == 0;
1509 }
1510
1511 static bool __kprobes __check_le(unsigned long pstate)
1512 {
1513         /*PSR_N_BIT ^= PSR_V_BIT */
1514         unsigned long temp = pstate ^ (pstate << 3);
1515
1516         temp |= (pstate << 1);  /*PSR_N_BIT |= PSR_Z_BIT */
1517         return (temp & PSR_N_BIT) != 0;
1518 }
1519
1520 static bool __kprobes __check_al(unsigned long pstate)
1521 {
1522         return true;
1523 }
1524
1525 /*
1526  * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1527  * it behaves identically to 0b1110 ("al").
1528  */
1529 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1530         __check_eq, __check_ne, __check_cs, __check_cc,
1531         __check_mi, __check_pl, __check_vs, __check_vc,
1532         __check_hi, __check_ls, __check_ge, __check_lt,
1533         __check_gt, __check_le, __check_al, __check_al
1534 };
1535
1536 static bool range_of_ones(u64 val)
1537 {
1538         /* Doesn't handle full ones or full zeroes */
1539         u64 sval = val >> __ffs64(val);
1540
1541         /* One of Sean Eron Anderson's bithack tricks */
1542         return ((sval + 1) & (sval)) == 0;
1543 }
1544
1545 static u32 aarch64_encode_immediate(u64 imm,
1546                                     enum aarch64_insn_variant variant,
1547                                     u32 insn)
1548 {
1549         unsigned int immr, imms, n, ones, ror, esz, tmp;
1550         u64 mask;
1551
1552         switch (variant) {
1553         case AARCH64_INSN_VARIANT_32BIT:
1554                 esz = 32;
1555                 break;
1556         case AARCH64_INSN_VARIANT_64BIT:
1557                 insn |= AARCH64_INSN_SF_BIT;
1558                 esz = 64;
1559                 break;
1560         default:
1561                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1562                 return AARCH64_BREAK_FAULT;
1563         }
1564
1565         mask = GENMASK(esz - 1, 0);
1566
1567         /* Can't encode full zeroes, full ones, or value wider than the mask */
1568         if (!imm || imm == mask || imm & ~mask)
1569                 return AARCH64_BREAK_FAULT;
1570
1571         /*
1572          * Inverse of Replicate(). Try to spot a repeating pattern
1573          * with a pow2 stride.
1574          */
1575         for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1576                 u64 emask = BIT(tmp) - 1;
1577
1578                 if ((imm & emask) != ((imm >> tmp) & emask))
1579                         break;
1580
1581                 esz = tmp;
1582                 mask = emask;
1583         }
1584
1585         /* N is only set if we're encoding a 64bit value */
1586         n = esz == 64;
1587
1588         /* Trim imm to the element size */
1589         imm &= mask;
1590
1591         /* That's how many ones we need to encode */
1592         ones = hweight64(imm);
1593
1594         /*
1595          * imms is set to (ones - 1), prefixed with a string of ones
1596          * and a zero if they fit. Cap it to 6 bits.
1597          */
1598         imms  = ones - 1;
1599         imms |= 0xf << ffs(esz);
1600         imms &= BIT(6) - 1;
1601
1602         /* Compute the rotation */
1603         if (range_of_ones(imm)) {
1604                 /*
1605                  * Pattern: 0..01..10..0
1606                  *
1607                  * Compute how many rotate we need to align it right
1608                  */
1609                 ror = __ffs64(imm);
1610         } else {
1611                 /*
1612                  * Pattern: 0..01..10..01..1
1613                  *
1614                  * Fill the unused top bits with ones, and check if
1615                  * the result is a valid immediate (all ones with a
1616                  * contiguous ranges of zeroes).
1617                  */
1618                 imm |= ~mask;
1619                 if (!range_of_ones(~imm))
1620                         return AARCH64_BREAK_FAULT;
1621
1622                 /*
1623                  * Compute the rotation to get a continuous set of
1624                  * ones, with the first bit set at position 0
1625                  */
1626                 ror = fls(~imm);
1627         }
1628
1629         /*
1630          * immr is the number of bits we need to rotate back to the
1631          * original set of ones. Note that this is relative to the
1632          * element size...
1633          */
1634         immr = (esz - ror) % esz;
1635
1636         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1637         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1638         return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1639 }
1640
1641 u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1642                                        enum aarch64_insn_variant variant,
1643                                        enum aarch64_insn_register Rn,
1644                                        enum aarch64_insn_register Rd,
1645                                        u64 imm)
1646 {
1647         u32 insn;
1648
1649         switch (type) {
1650         case AARCH64_INSN_LOGIC_AND:
1651                 insn = aarch64_insn_get_and_imm_value();
1652                 break;
1653         case AARCH64_INSN_LOGIC_ORR:
1654                 insn = aarch64_insn_get_orr_imm_value();
1655                 break;
1656         case AARCH64_INSN_LOGIC_EOR:
1657                 insn = aarch64_insn_get_eor_imm_value();
1658                 break;
1659         case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1660                 insn = aarch64_insn_get_ands_imm_value();
1661                 break;
1662         default:
1663                 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1664                 return AARCH64_BREAK_FAULT;
1665         }
1666
1667         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1668         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1669         return aarch64_encode_immediate(imm, variant, insn);
1670 }
1671
1672 u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1673                           enum aarch64_insn_register Rm,
1674                           enum aarch64_insn_register Rn,
1675                           enum aarch64_insn_register Rd,
1676                           u8 lsb)
1677 {
1678         u32 insn;
1679
1680         insn = aarch64_insn_get_extr_value();
1681
1682         switch (variant) {
1683         case AARCH64_INSN_VARIANT_32BIT:
1684                 if (lsb > 31)
1685                         return AARCH64_BREAK_FAULT;
1686                 break;
1687         case AARCH64_INSN_VARIANT_64BIT:
1688                 if (lsb > 63)
1689                         return AARCH64_BREAK_FAULT;
1690                 insn |= AARCH64_INSN_SF_BIT;
1691                 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1692                 break;
1693         default:
1694                 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1695                 return AARCH64_BREAK_FAULT;
1696         }
1697
1698         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1699         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1700         insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1701         return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1702 }