1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Huawei Ltd.
4 * Author: Jiang Liu <liuj97@gmail.com>
6 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
8 #include <linux/bitops.h>
10 #include <linux/compiler.h>
11 #include <linux/kernel.h>
13 #include <linux/smp.h>
14 #include <linux/spinlock.h>
15 #include <linux/stop_machine.h>
16 #include <linux/types.h>
17 #include <linux/uaccess.h>
19 #include <asm/cacheflush.h>
20 #include <asm/debug-monitors.h>
21 #include <asm/fixmap.h>
23 #include <asm/kprobes.h>
24 #include <asm/sections.h>
26 #define AARCH64_INSN_SF_BIT BIT(31)
27 #define AARCH64_INSN_N_BIT BIT(22)
28 #define AARCH64_INSN_LSL_12 BIT(22)
30 static const int aarch64_insn_encoding_class[] = {
31 AARCH64_INSN_CLS_UNKNOWN,
32 AARCH64_INSN_CLS_UNKNOWN,
33 AARCH64_INSN_CLS_UNKNOWN,
34 AARCH64_INSN_CLS_UNKNOWN,
35 AARCH64_INSN_CLS_LDST,
36 AARCH64_INSN_CLS_DP_REG,
37 AARCH64_INSN_CLS_LDST,
38 AARCH64_INSN_CLS_DP_FPSIMD,
39 AARCH64_INSN_CLS_DP_IMM,
40 AARCH64_INSN_CLS_DP_IMM,
41 AARCH64_INSN_CLS_BR_SYS,
42 AARCH64_INSN_CLS_BR_SYS,
43 AARCH64_INSN_CLS_LDST,
44 AARCH64_INSN_CLS_DP_REG,
45 AARCH64_INSN_CLS_LDST,
46 AARCH64_INSN_CLS_DP_FPSIMD,
49 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
51 return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
54 bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
56 if (!aarch64_insn_is_hint(insn))
59 switch (insn & 0xFE0) {
60 case AARCH64_INSN_HINT_XPACLRI:
61 case AARCH64_INSN_HINT_PACIA_1716:
62 case AARCH64_INSN_HINT_PACIB_1716:
63 case AARCH64_INSN_HINT_AUTIA_1716:
64 case AARCH64_INSN_HINT_AUTIB_1716:
65 case AARCH64_INSN_HINT_PACIAZ:
66 case AARCH64_INSN_HINT_PACIASP:
67 case AARCH64_INSN_HINT_PACIBZ:
68 case AARCH64_INSN_HINT_PACIBSP:
69 case AARCH64_INSN_HINT_AUTIAZ:
70 case AARCH64_INSN_HINT_AUTIASP:
71 case AARCH64_INSN_HINT_AUTIBZ:
72 case AARCH64_INSN_HINT_AUTIBSP:
73 case AARCH64_INSN_HINT_BTI:
74 case AARCH64_INSN_HINT_BTIC:
75 case AARCH64_INSN_HINT_BTIJ:
76 case AARCH64_INSN_HINT_BTIJC:
77 case AARCH64_INSN_HINT_NOP:
84 bool aarch64_insn_is_branch_imm(u32 insn)
86 return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
87 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
88 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
89 aarch64_insn_is_bcond(insn));
92 static DEFINE_RAW_SPINLOCK(patch_lock);
94 static bool is_exit_text(unsigned long addr)
96 /* discarded with init text/data */
97 return system_state < SYSTEM_RUNNING &&
98 addr >= (unsigned long)__exittext_begin &&
99 addr < (unsigned long)__exittext_end;
102 static bool is_image_text(unsigned long addr)
104 return core_kernel_text(addr) || is_exit_text(addr);
107 static void __kprobes *patch_map(void *addr, int fixmap)
109 unsigned long uintaddr = (uintptr_t) addr;
110 bool image = is_image_text(uintaddr);
114 page = phys_to_page(__pa_symbol(addr));
115 else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
116 page = vmalloc_to_page(addr);
121 return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
122 (uintaddr & ~PAGE_MASK));
125 static void __kprobes patch_unmap(int fixmap)
127 clear_fixmap(fixmap);
130 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
133 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
138 ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
140 *insnp = le32_to_cpu(val);
145 static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
148 unsigned long flags = 0;
151 raw_spin_lock_irqsave(&patch_lock, flags);
152 waddr = patch_map(addr, FIX_TEXT_POKE0);
154 ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
156 patch_unmap(FIX_TEXT_POKE0);
157 raw_spin_unlock_irqrestore(&patch_lock, flags);
162 int __kprobes aarch64_insn_write(void *addr, u32 insn)
164 return __aarch64_insn_write(addr, cpu_to_le32(insn));
167 bool __kprobes aarch64_insn_uses_literal(u32 insn)
169 /* ldr/ldrsw (literal), prfm */
171 return aarch64_insn_is_ldr_lit(insn) ||
172 aarch64_insn_is_ldrsw_lit(insn) ||
173 aarch64_insn_is_adr_adrp(insn) ||
174 aarch64_insn_is_prfm_lit(insn);
177 bool __kprobes aarch64_insn_is_branch(u32 insn)
179 /* b, bl, cb*, tb*, b.cond, br, blr */
181 return aarch64_insn_is_b(insn) ||
182 aarch64_insn_is_bl(insn) ||
183 aarch64_insn_is_cbz(insn) ||
184 aarch64_insn_is_cbnz(insn) ||
185 aarch64_insn_is_tbz(insn) ||
186 aarch64_insn_is_tbnz(insn) ||
187 aarch64_insn_is_ret(insn) ||
188 aarch64_insn_is_br(insn) ||
189 aarch64_insn_is_blr(insn) ||
190 aarch64_insn_is_bcond(insn);
193 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
198 /* A64 instructions must be word aligned */
199 if ((uintptr_t)tp & 0x3)
202 ret = aarch64_insn_write(tp, insn);
204 __flush_icache_range((uintptr_t)tp,
205 (uintptr_t)tp + AARCH64_INSN_SIZE);
210 struct aarch64_insn_patch {
217 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
220 struct aarch64_insn_patch *pp = arg;
222 /* The first CPU becomes master */
223 if (atomic_inc_return(&pp->cpu_count) == 1) {
224 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
225 ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
227 /* Notify other processors with an additional increment. */
228 atomic_inc(&pp->cpu_count);
230 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
238 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
240 struct aarch64_insn_patch patch = {
244 .cpu_count = ATOMIC_INIT(0),
250 return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
254 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
255 u32 *maskp, int *shiftp)
261 case AARCH64_INSN_IMM_26:
265 case AARCH64_INSN_IMM_19:
269 case AARCH64_INSN_IMM_16:
273 case AARCH64_INSN_IMM_14:
277 case AARCH64_INSN_IMM_12:
281 case AARCH64_INSN_IMM_9:
285 case AARCH64_INSN_IMM_7:
289 case AARCH64_INSN_IMM_6:
290 case AARCH64_INSN_IMM_S:
294 case AARCH64_INSN_IMM_R:
298 case AARCH64_INSN_IMM_N:
312 #define ADR_IMM_HILOSPLIT 2
313 #define ADR_IMM_SIZE SZ_2M
314 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
315 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
316 #define ADR_IMM_LOSHIFT 29
317 #define ADR_IMM_HISHIFT 5
319 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
321 u32 immlo, immhi, mask;
325 case AARCH64_INSN_IMM_ADR:
327 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
328 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
329 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
330 mask = ADR_IMM_SIZE - 1;
333 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
334 pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
340 return (insn >> shift) & mask;
343 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
346 u32 immlo, immhi, mask;
349 if (insn == AARCH64_BREAK_FAULT)
350 return AARCH64_BREAK_FAULT;
353 case AARCH64_INSN_IMM_ADR:
355 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
356 imm >>= ADR_IMM_HILOSPLIT;
357 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
359 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
360 (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
363 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
364 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
366 return AARCH64_BREAK_FAULT;
370 /* Update the immediate field. */
371 insn &= ~(mask << shift);
372 insn |= (imm & mask) << shift;
377 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
383 case AARCH64_INSN_REGTYPE_RT:
384 case AARCH64_INSN_REGTYPE_RD:
387 case AARCH64_INSN_REGTYPE_RN:
390 case AARCH64_INSN_REGTYPE_RT2:
391 case AARCH64_INSN_REGTYPE_RA:
394 case AARCH64_INSN_REGTYPE_RM:
398 pr_err("%s: unknown register type encoding %d\n", __func__,
403 return (insn >> shift) & GENMASK(4, 0);
406 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
408 enum aarch64_insn_register reg)
412 if (insn == AARCH64_BREAK_FAULT)
413 return AARCH64_BREAK_FAULT;
415 if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
416 pr_err("%s: unknown register encoding %d\n", __func__, reg);
417 return AARCH64_BREAK_FAULT;
421 case AARCH64_INSN_REGTYPE_RT:
422 case AARCH64_INSN_REGTYPE_RD:
425 case AARCH64_INSN_REGTYPE_RN:
428 case AARCH64_INSN_REGTYPE_RT2:
429 case AARCH64_INSN_REGTYPE_RA:
432 case AARCH64_INSN_REGTYPE_RM:
433 case AARCH64_INSN_REGTYPE_RS:
437 pr_err("%s: unknown register type encoding %d\n", __func__,
439 return AARCH64_BREAK_FAULT;
442 insn &= ~(GENMASK(4, 0) << shift);
443 insn |= reg << shift;
448 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
454 case AARCH64_INSN_SIZE_8:
457 case AARCH64_INSN_SIZE_16:
460 case AARCH64_INSN_SIZE_32:
463 case AARCH64_INSN_SIZE_64:
467 pr_err("%s: unknown size encoding %d\n", __func__, type);
468 return AARCH64_BREAK_FAULT;
471 insn &= ~GENMASK(31, 30);
477 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
482 if ((pc & 0x3) || (addr & 0x3)) {
483 pr_err("%s: A64 instructions must be word aligned\n", __func__);
487 offset = ((long)addr - (long)pc);
489 if (offset < -range || offset >= range) {
490 pr_err("%s: offset out of range\n", __func__);
497 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
498 enum aarch64_insn_branch_type type)
504 * B/BL support [-128M, 128M) offset
505 * ARM64 virtual address arrangement guarantees all kernel and module
506 * texts are within +/-128M.
508 offset = branch_imm_common(pc, addr, SZ_128M);
509 if (offset >= SZ_128M)
510 return AARCH64_BREAK_FAULT;
513 case AARCH64_INSN_BRANCH_LINK:
514 insn = aarch64_insn_get_bl_value();
516 case AARCH64_INSN_BRANCH_NOLINK:
517 insn = aarch64_insn_get_b_value();
520 pr_err("%s: unknown branch encoding %d\n", __func__, type);
521 return AARCH64_BREAK_FAULT;
524 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
528 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
529 enum aarch64_insn_register reg,
530 enum aarch64_insn_variant variant,
531 enum aarch64_insn_branch_type type)
536 offset = branch_imm_common(pc, addr, SZ_1M);
538 return AARCH64_BREAK_FAULT;
541 case AARCH64_INSN_BRANCH_COMP_ZERO:
542 insn = aarch64_insn_get_cbz_value();
544 case AARCH64_INSN_BRANCH_COMP_NONZERO:
545 insn = aarch64_insn_get_cbnz_value();
548 pr_err("%s: unknown branch encoding %d\n", __func__, type);
549 return AARCH64_BREAK_FAULT;
553 case AARCH64_INSN_VARIANT_32BIT:
555 case AARCH64_INSN_VARIANT_64BIT:
556 insn |= AARCH64_INSN_SF_BIT;
559 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
560 return AARCH64_BREAK_FAULT;
563 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
565 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
569 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
570 enum aarch64_insn_condition cond)
575 offset = branch_imm_common(pc, addr, SZ_1M);
577 insn = aarch64_insn_get_bcond_value();
579 if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
580 pr_err("%s: unknown condition encoding %d\n", __func__, cond);
581 return AARCH64_BREAK_FAULT;
585 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
589 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
591 return aarch64_insn_get_hint_value() | op;
594 u32 __kprobes aarch64_insn_gen_nop(void)
596 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
599 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
600 enum aarch64_insn_branch_type type)
605 case AARCH64_INSN_BRANCH_NOLINK:
606 insn = aarch64_insn_get_br_value();
608 case AARCH64_INSN_BRANCH_LINK:
609 insn = aarch64_insn_get_blr_value();
611 case AARCH64_INSN_BRANCH_RETURN:
612 insn = aarch64_insn_get_ret_value();
615 pr_err("%s: unknown branch encoding %d\n", __func__, type);
616 return AARCH64_BREAK_FAULT;
619 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
622 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
623 enum aarch64_insn_register base,
624 enum aarch64_insn_register offset,
625 enum aarch64_insn_size_type size,
626 enum aarch64_insn_ldst_type type)
631 case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
632 insn = aarch64_insn_get_ldr_reg_value();
634 case AARCH64_INSN_LDST_STORE_REG_OFFSET:
635 insn = aarch64_insn_get_str_reg_value();
638 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
639 return AARCH64_BREAK_FAULT;
642 insn = aarch64_insn_encode_ldst_size(size, insn);
644 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
646 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
649 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
653 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
654 enum aarch64_insn_register reg2,
655 enum aarch64_insn_register base,
657 enum aarch64_insn_variant variant,
658 enum aarch64_insn_ldst_type type)
664 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
665 insn = aarch64_insn_get_ldp_pre_value();
667 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
668 insn = aarch64_insn_get_stp_pre_value();
670 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
671 insn = aarch64_insn_get_ldp_post_value();
673 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
674 insn = aarch64_insn_get_stp_post_value();
677 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
678 return AARCH64_BREAK_FAULT;
682 case AARCH64_INSN_VARIANT_32BIT:
683 if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
684 pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
686 return AARCH64_BREAK_FAULT;
690 case AARCH64_INSN_VARIANT_64BIT:
691 if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
692 pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
694 return AARCH64_BREAK_FAULT;
697 insn |= AARCH64_INSN_SF_BIT;
700 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
701 return AARCH64_BREAK_FAULT;
704 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
707 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
710 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
713 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
717 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
718 enum aarch64_insn_register base,
719 enum aarch64_insn_register state,
720 enum aarch64_insn_size_type size,
721 enum aarch64_insn_ldst_type type)
726 case AARCH64_INSN_LDST_LOAD_EX:
727 insn = aarch64_insn_get_load_ex_value();
729 case AARCH64_INSN_LDST_STORE_EX:
730 insn = aarch64_insn_get_store_ex_value();
733 pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
734 return AARCH64_BREAK_FAULT;
737 insn = aarch64_insn_encode_ldst_size(size, insn);
739 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
742 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
745 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
746 AARCH64_INSN_REG_ZR);
748 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
752 u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
753 enum aarch64_insn_register address,
754 enum aarch64_insn_register value,
755 enum aarch64_insn_size_type size)
757 u32 insn = aarch64_insn_get_ldadd_value();
760 case AARCH64_INSN_SIZE_32:
761 case AARCH64_INSN_SIZE_64:
764 pr_err("%s: unimplemented size encoding %d\n", __func__, size);
765 return AARCH64_BREAK_FAULT;
768 insn = aarch64_insn_encode_ldst_size(size, insn);
770 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
773 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
776 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
780 u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
781 enum aarch64_insn_register value,
782 enum aarch64_insn_size_type size)
785 * STADD is simply encoded as an alias for LDADD with XZR as
786 * the destination register.
788 return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
792 static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
793 enum aarch64_insn_prfm_target target,
794 enum aarch64_insn_prfm_policy policy,
797 u32 imm_type = 0, imm_target = 0, imm_policy = 0;
800 case AARCH64_INSN_PRFM_TYPE_PLD:
802 case AARCH64_INSN_PRFM_TYPE_PLI:
805 case AARCH64_INSN_PRFM_TYPE_PST:
809 pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
810 return AARCH64_BREAK_FAULT;
814 case AARCH64_INSN_PRFM_TARGET_L1:
816 case AARCH64_INSN_PRFM_TARGET_L2:
819 case AARCH64_INSN_PRFM_TARGET_L3:
823 pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
824 return AARCH64_BREAK_FAULT;
828 case AARCH64_INSN_PRFM_POLICY_KEEP:
830 case AARCH64_INSN_PRFM_POLICY_STRM:
834 pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
835 return AARCH64_BREAK_FAULT;
838 /* In this case, imm5 is encoded into Rt field. */
839 insn &= ~GENMASK(4, 0);
840 insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
845 u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
846 enum aarch64_insn_prfm_type type,
847 enum aarch64_insn_prfm_target target,
848 enum aarch64_insn_prfm_policy policy)
850 u32 insn = aarch64_insn_get_prfm_value();
852 insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
854 insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
856 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
859 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
862 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
863 enum aarch64_insn_register src,
864 int imm, enum aarch64_insn_variant variant,
865 enum aarch64_insn_adsb_type type)
870 case AARCH64_INSN_ADSB_ADD:
871 insn = aarch64_insn_get_add_imm_value();
873 case AARCH64_INSN_ADSB_SUB:
874 insn = aarch64_insn_get_sub_imm_value();
876 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
877 insn = aarch64_insn_get_adds_imm_value();
879 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
880 insn = aarch64_insn_get_subs_imm_value();
883 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
884 return AARCH64_BREAK_FAULT;
888 case AARCH64_INSN_VARIANT_32BIT:
890 case AARCH64_INSN_VARIANT_64BIT:
891 insn |= AARCH64_INSN_SF_BIT;
894 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
895 return AARCH64_BREAK_FAULT;
898 /* We can't encode more than a 24bit value (12bit + 12bit shift) */
899 if (imm & ~(BIT(24) - 1))
902 /* If we have something in the top 12 bits... */
903 if (imm & ~(SZ_4K - 1)) {
904 /* ... and in the low 12 bits -> error */
905 if (imm & (SZ_4K - 1))
909 insn |= AARCH64_INSN_LSL_12;
912 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
914 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
916 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
919 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
920 return AARCH64_BREAK_FAULT;
923 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
924 enum aarch64_insn_register src,
926 enum aarch64_insn_variant variant,
927 enum aarch64_insn_bitfield_type type)
933 case AARCH64_INSN_BITFIELD_MOVE:
934 insn = aarch64_insn_get_bfm_value();
936 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
937 insn = aarch64_insn_get_ubfm_value();
939 case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
940 insn = aarch64_insn_get_sbfm_value();
943 pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
944 return AARCH64_BREAK_FAULT;
948 case AARCH64_INSN_VARIANT_32BIT:
949 mask = GENMASK(4, 0);
951 case AARCH64_INSN_VARIANT_64BIT:
952 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
953 mask = GENMASK(5, 0);
956 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
957 return AARCH64_BREAK_FAULT;
961 pr_err("%s: invalid immr encoding %d\n", __func__, immr);
962 return AARCH64_BREAK_FAULT;
965 pr_err("%s: invalid imms encoding %d\n", __func__, imms);
966 return AARCH64_BREAK_FAULT;
969 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
971 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
973 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
975 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
978 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
980 enum aarch64_insn_variant variant,
981 enum aarch64_insn_movewide_type type)
986 case AARCH64_INSN_MOVEWIDE_ZERO:
987 insn = aarch64_insn_get_movz_value();
989 case AARCH64_INSN_MOVEWIDE_KEEP:
990 insn = aarch64_insn_get_movk_value();
992 case AARCH64_INSN_MOVEWIDE_INVERSE:
993 insn = aarch64_insn_get_movn_value();
996 pr_err("%s: unknown movewide encoding %d\n", __func__, type);
997 return AARCH64_BREAK_FAULT;
1000 if (imm & ~(SZ_64K - 1)) {
1001 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
1002 return AARCH64_BREAK_FAULT;
1006 case AARCH64_INSN_VARIANT_32BIT:
1007 if (shift != 0 && shift != 16) {
1008 pr_err("%s: invalid shift encoding %d\n", __func__,
1010 return AARCH64_BREAK_FAULT;
1013 case AARCH64_INSN_VARIANT_64BIT:
1014 insn |= AARCH64_INSN_SF_BIT;
1015 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
1016 pr_err("%s: invalid shift encoding %d\n", __func__,
1018 return AARCH64_BREAK_FAULT;
1022 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1023 return AARCH64_BREAK_FAULT;
1026 insn |= (shift >> 4) << 21;
1028 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1030 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
1033 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
1034 enum aarch64_insn_register src,
1035 enum aarch64_insn_register reg,
1037 enum aarch64_insn_variant variant,
1038 enum aarch64_insn_adsb_type type)
1043 case AARCH64_INSN_ADSB_ADD:
1044 insn = aarch64_insn_get_add_value();
1046 case AARCH64_INSN_ADSB_SUB:
1047 insn = aarch64_insn_get_sub_value();
1049 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
1050 insn = aarch64_insn_get_adds_value();
1052 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
1053 insn = aarch64_insn_get_subs_value();
1056 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
1057 return AARCH64_BREAK_FAULT;
1061 case AARCH64_INSN_VARIANT_32BIT:
1062 if (shift & ~(SZ_32 - 1)) {
1063 pr_err("%s: invalid shift encoding %d\n", __func__,
1065 return AARCH64_BREAK_FAULT;
1068 case AARCH64_INSN_VARIANT_64BIT:
1069 insn |= AARCH64_INSN_SF_BIT;
1070 if (shift & ~(SZ_64 - 1)) {
1071 pr_err("%s: invalid shift encoding %d\n", __func__,
1073 return AARCH64_BREAK_FAULT;
1077 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1078 return AARCH64_BREAK_FAULT;
1082 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1084 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1086 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1088 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1091 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
1092 enum aarch64_insn_register src,
1093 enum aarch64_insn_variant variant,
1094 enum aarch64_insn_data1_type type)
1099 case AARCH64_INSN_DATA1_REVERSE_16:
1100 insn = aarch64_insn_get_rev16_value();
1102 case AARCH64_INSN_DATA1_REVERSE_32:
1103 insn = aarch64_insn_get_rev32_value();
1105 case AARCH64_INSN_DATA1_REVERSE_64:
1106 if (variant != AARCH64_INSN_VARIANT_64BIT) {
1107 pr_err("%s: invalid variant for reverse64 %d\n",
1109 return AARCH64_BREAK_FAULT;
1111 insn = aarch64_insn_get_rev64_value();
1114 pr_err("%s: unknown data1 encoding %d\n", __func__, type);
1115 return AARCH64_BREAK_FAULT;
1119 case AARCH64_INSN_VARIANT_32BIT:
1121 case AARCH64_INSN_VARIANT_64BIT:
1122 insn |= AARCH64_INSN_SF_BIT;
1125 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1126 return AARCH64_BREAK_FAULT;
1129 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1131 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1134 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1135 enum aarch64_insn_register src,
1136 enum aarch64_insn_register reg,
1137 enum aarch64_insn_variant variant,
1138 enum aarch64_insn_data2_type type)
1143 case AARCH64_INSN_DATA2_UDIV:
1144 insn = aarch64_insn_get_udiv_value();
1146 case AARCH64_INSN_DATA2_SDIV:
1147 insn = aarch64_insn_get_sdiv_value();
1149 case AARCH64_INSN_DATA2_LSLV:
1150 insn = aarch64_insn_get_lslv_value();
1152 case AARCH64_INSN_DATA2_LSRV:
1153 insn = aarch64_insn_get_lsrv_value();
1155 case AARCH64_INSN_DATA2_ASRV:
1156 insn = aarch64_insn_get_asrv_value();
1158 case AARCH64_INSN_DATA2_RORV:
1159 insn = aarch64_insn_get_rorv_value();
1162 pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1163 return AARCH64_BREAK_FAULT;
1167 case AARCH64_INSN_VARIANT_32BIT:
1169 case AARCH64_INSN_VARIANT_64BIT:
1170 insn |= AARCH64_INSN_SF_BIT;
1173 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1174 return AARCH64_BREAK_FAULT;
1177 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1179 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1181 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1184 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1185 enum aarch64_insn_register src,
1186 enum aarch64_insn_register reg1,
1187 enum aarch64_insn_register reg2,
1188 enum aarch64_insn_variant variant,
1189 enum aarch64_insn_data3_type type)
1194 case AARCH64_INSN_DATA3_MADD:
1195 insn = aarch64_insn_get_madd_value();
1197 case AARCH64_INSN_DATA3_MSUB:
1198 insn = aarch64_insn_get_msub_value();
1201 pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1202 return AARCH64_BREAK_FAULT;
1206 case AARCH64_INSN_VARIANT_32BIT:
1208 case AARCH64_INSN_VARIANT_64BIT:
1209 insn |= AARCH64_INSN_SF_BIT;
1212 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1213 return AARCH64_BREAK_FAULT;
1216 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1218 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1220 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1223 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1227 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1228 enum aarch64_insn_register src,
1229 enum aarch64_insn_register reg,
1231 enum aarch64_insn_variant variant,
1232 enum aarch64_insn_logic_type type)
1237 case AARCH64_INSN_LOGIC_AND:
1238 insn = aarch64_insn_get_and_value();
1240 case AARCH64_INSN_LOGIC_BIC:
1241 insn = aarch64_insn_get_bic_value();
1243 case AARCH64_INSN_LOGIC_ORR:
1244 insn = aarch64_insn_get_orr_value();
1246 case AARCH64_INSN_LOGIC_ORN:
1247 insn = aarch64_insn_get_orn_value();
1249 case AARCH64_INSN_LOGIC_EOR:
1250 insn = aarch64_insn_get_eor_value();
1252 case AARCH64_INSN_LOGIC_EON:
1253 insn = aarch64_insn_get_eon_value();
1255 case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1256 insn = aarch64_insn_get_ands_value();
1258 case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1259 insn = aarch64_insn_get_bics_value();
1262 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1263 return AARCH64_BREAK_FAULT;
1267 case AARCH64_INSN_VARIANT_32BIT:
1268 if (shift & ~(SZ_32 - 1)) {
1269 pr_err("%s: invalid shift encoding %d\n", __func__,
1271 return AARCH64_BREAK_FAULT;
1274 case AARCH64_INSN_VARIANT_64BIT:
1275 insn |= AARCH64_INSN_SF_BIT;
1276 if (shift & ~(SZ_64 - 1)) {
1277 pr_err("%s: invalid shift encoding %d\n", __func__,
1279 return AARCH64_BREAK_FAULT;
1283 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1284 return AARCH64_BREAK_FAULT;
1288 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1290 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1292 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1294 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1298 * MOV (register) is architecturally an alias of ORR (shifted register) where
1299 * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
1301 u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
1302 enum aarch64_insn_register src,
1303 enum aarch64_insn_variant variant)
1305 return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
1307 AARCH64_INSN_LOGIC_ORR);
1310 u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
1311 enum aarch64_insn_register reg,
1312 enum aarch64_insn_adr_type type)
1318 case AARCH64_INSN_ADR_TYPE_ADR:
1319 insn = aarch64_insn_get_adr_value();
1322 case AARCH64_INSN_ADR_TYPE_ADRP:
1323 insn = aarch64_insn_get_adrp_value();
1324 offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
1327 pr_err("%s: unknown adr encoding %d\n", __func__, type);
1328 return AARCH64_BREAK_FAULT;
1331 if (offset < -SZ_1M || offset >= SZ_1M)
1332 return AARCH64_BREAK_FAULT;
1334 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
1336 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
1340 * Decode the imm field of a branch, and return the byte offset as a
1341 * signed value (so it can be used when computing a new branch
1344 s32 aarch64_get_branch_offset(u32 insn)
1348 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1349 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1350 return (imm << 6) >> 4;
1353 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1354 aarch64_insn_is_bcond(insn)) {
1355 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1356 return (imm << 13) >> 11;
1359 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1360 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1361 return (imm << 18) >> 16;
1364 /* Unhandled instruction */
1369 * Encode the displacement of a branch in the imm field and return the
1370 * updated instruction.
1372 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1374 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1375 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1378 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1379 aarch64_insn_is_bcond(insn))
1380 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1383 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1384 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1387 /* Unhandled instruction */
1391 s32 aarch64_insn_adrp_get_offset(u32 insn)
1393 BUG_ON(!aarch64_insn_is_adrp(insn));
1394 return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1397 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1399 BUG_ON(!aarch64_insn_is_adrp(insn));
1400 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1405 * Extract the Op/CR data from a msr/mrs instruction.
1407 u32 aarch64_insn_extract_system_reg(u32 insn)
1409 return (insn & 0x1FFFE0) >> 5;
1412 bool aarch32_insn_is_wide(u32 insn)
1414 return insn >= 0xe800;
1418 * Macros/defines for extracting register numbers from instruction.
1420 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1422 return (insn & (0xf << offset)) >> offset;
1425 #define OPC2_MASK 0x7
1426 #define OPC2_OFFSET 5
1427 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1429 return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1432 #define CRM_MASK 0xf
1433 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1435 return insn & CRM_MASK;
1438 static bool __kprobes __check_eq(unsigned long pstate)
1440 return (pstate & PSR_Z_BIT) != 0;
1443 static bool __kprobes __check_ne(unsigned long pstate)
1445 return (pstate & PSR_Z_BIT) == 0;
1448 static bool __kprobes __check_cs(unsigned long pstate)
1450 return (pstate & PSR_C_BIT) != 0;
1453 static bool __kprobes __check_cc(unsigned long pstate)
1455 return (pstate & PSR_C_BIT) == 0;
1458 static bool __kprobes __check_mi(unsigned long pstate)
1460 return (pstate & PSR_N_BIT) != 0;
1463 static bool __kprobes __check_pl(unsigned long pstate)
1465 return (pstate & PSR_N_BIT) == 0;
1468 static bool __kprobes __check_vs(unsigned long pstate)
1470 return (pstate & PSR_V_BIT) != 0;
1473 static bool __kprobes __check_vc(unsigned long pstate)
1475 return (pstate & PSR_V_BIT) == 0;
1478 static bool __kprobes __check_hi(unsigned long pstate)
1480 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1481 return (pstate & PSR_C_BIT) != 0;
1484 static bool __kprobes __check_ls(unsigned long pstate)
1486 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1487 return (pstate & PSR_C_BIT) == 0;
1490 static bool __kprobes __check_ge(unsigned long pstate)
1492 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1493 return (pstate & PSR_N_BIT) == 0;
1496 static bool __kprobes __check_lt(unsigned long pstate)
1498 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1499 return (pstate & PSR_N_BIT) != 0;
1502 static bool __kprobes __check_gt(unsigned long pstate)
1504 /*PSR_N_BIT ^= PSR_V_BIT */
1505 unsigned long temp = pstate ^ (pstate << 3);
1507 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
1508 return (temp & PSR_N_BIT) == 0;
1511 static bool __kprobes __check_le(unsigned long pstate)
1513 /*PSR_N_BIT ^= PSR_V_BIT */
1514 unsigned long temp = pstate ^ (pstate << 3);
1516 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
1517 return (temp & PSR_N_BIT) != 0;
1520 static bool __kprobes __check_al(unsigned long pstate)
1526 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1527 * it behaves identically to 0b1110 ("al").
1529 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1530 __check_eq, __check_ne, __check_cs, __check_cc,
1531 __check_mi, __check_pl, __check_vs, __check_vc,
1532 __check_hi, __check_ls, __check_ge, __check_lt,
1533 __check_gt, __check_le, __check_al, __check_al
1536 static bool range_of_ones(u64 val)
1538 /* Doesn't handle full ones or full zeroes */
1539 u64 sval = val >> __ffs64(val);
1541 /* One of Sean Eron Anderson's bithack tricks */
1542 return ((sval + 1) & (sval)) == 0;
1545 static u32 aarch64_encode_immediate(u64 imm,
1546 enum aarch64_insn_variant variant,
1549 unsigned int immr, imms, n, ones, ror, esz, tmp;
1553 case AARCH64_INSN_VARIANT_32BIT:
1556 case AARCH64_INSN_VARIANT_64BIT:
1557 insn |= AARCH64_INSN_SF_BIT;
1561 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1562 return AARCH64_BREAK_FAULT;
1565 mask = GENMASK(esz - 1, 0);
1567 /* Can't encode full zeroes, full ones, or value wider than the mask */
1568 if (!imm || imm == mask || imm & ~mask)
1569 return AARCH64_BREAK_FAULT;
1572 * Inverse of Replicate(). Try to spot a repeating pattern
1573 * with a pow2 stride.
1575 for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1576 u64 emask = BIT(tmp) - 1;
1578 if ((imm & emask) != ((imm >> tmp) & emask))
1585 /* N is only set if we're encoding a 64bit value */
1588 /* Trim imm to the element size */
1591 /* That's how many ones we need to encode */
1592 ones = hweight64(imm);
1595 * imms is set to (ones - 1), prefixed with a string of ones
1596 * and a zero if they fit. Cap it to 6 bits.
1599 imms |= 0xf << ffs(esz);
1602 /* Compute the rotation */
1603 if (range_of_ones(imm)) {
1605 * Pattern: 0..01..10..0
1607 * Compute how many rotate we need to align it right
1612 * Pattern: 0..01..10..01..1
1614 * Fill the unused top bits with ones, and check if
1615 * the result is a valid immediate (all ones with a
1616 * contiguous ranges of zeroes).
1619 if (!range_of_ones(~imm))
1620 return AARCH64_BREAK_FAULT;
1623 * Compute the rotation to get a continuous set of
1624 * ones, with the first bit set at position 0
1630 * immr is the number of bits we need to rotate back to the
1631 * original set of ones. Note that this is relative to the
1634 immr = (esz - ror) % esz;
1636 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1637 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1638 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1641 u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1642 enum aarch64_insn_variant variant,
1643 enum aarch64_insn_register Rn,
1644 enum aarch64_insn_register Rd,
1650 case AARCH64_INSN_LOGIC_AND:
1651 insn = aarch64_insn_get_and_imm_value();
1653 case AARCH64_INSN_LOGIC_ORR:
1654 insn = aarch64_insn_get_orr_imm_value();
1656 case AARCH64_INSN_LOGIC_EOR:
1657 insn = aarch64_insn_get_eor_imm_value();
1659 case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1660 insn = aarch64_insn_get_ands_imm_value();
1663 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1664 return AARCH64_BREAK_FAULT;
1667 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1668 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1669 return aarch64_encode_immediate(imm, variant, insn);
1672 u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1673 enum aarch64_insn_register Rm,
1674 enum aarch64_insn_register Rn,
1675 enum aarch64_insn_register Rd,
1680 insn = aarch64_insn_get_extr_value();
1683 case AARCH64_INSN_VARIANT_32BIT:
1685 return AARCH64_BREAK_FAULT;
1687 case AARCH64_INSN_VARIANT_64BIT:
1689 return AARCH64_BREAK_FAULT;
1690 insn |= AARCH64_INSN_SF_BIT;
1691 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1694 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1695 return AARCH64_BREAK_FAULT;
1698 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1699 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1700 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1701 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);