1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
5 * Copyright (C) 1996-2000 Russell King
6 * Copyright (C) 2012 ARM Ltd.
9 #error "Only include this from assembly code"
12 #ifndef __ASM_ASSEMBLER_H
13 #define __ASM_ASSEMBLER_H
15 #include <linux/export.h>
17 #include <asm/alternative.h>
18 #include <asm/asm-bug.h>
19 #include <asm/asm-extable.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/cpufeature.h>
22 #include <asm/cputype.h>
23 #include <asm/debug-monitors.h>
25 #include <asm/pgtable-hwdef.h>
26 #include <asm/ptrace.h>
27 #include <asm/thread_info.h>
30 * Provide a wxN alias for each wN register so what we can paste a xN
31 * reference after a 'w' to obtain the 32-bit version.
33 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
46 * Save/restore interrupts.
48 .macro save_and_disable_irq, flags
53 .macro restore_irq, flags
61 .macro disable_step_tsk, flgs, tmp
62 tbz \flgs, #TIF_SINGLESTEP, 9990f
64 bic \tmp, \tmp, #DBG_MDSCR_SS
66 isb // Synchronise with enable_dbg
70 /* call with daif masked */
71 .macro enable_step_tsk, flgs, tmp
72 tbz \flgs, #TIF_SINGLESTEP, 9990f
74 orr \tmp, \tmp, #DBG_MDSCR_SS
80 * RAS Error Synchronization barrier
83 #ifdef CONFIG_ARM64_RAS_EXTN
91 * Value prediction barrier
98 * Clear Branch History instruction
105 * Speculation barrier
108 alternative_if_not ARM64_HAS_SB
129 lr .req x30 // link register
140 * Select code when configured for BE.
142 #ifdef CONFIG_CPU_BIG_ENDIAN
143 #define CPU_BE(code...) code
145 #define CPU_BE(code...)
149 * Select code when configured for LE.
151 #ifdef CONFIG_CPU_BIG_ENDIAN
152 #define CPU_LE(code...)
154 #define CPU_LE(code...) code
158 * Define a macro that constructs a 64-bit value by concatenating two
159 * 32-bit registers. Note that on big endian systems the order of the
160 * registers is swapped.
162 #ifndef CONFIG_CPU_BIG_ENDIAN
163 .macro regs_to_64, rd, lbits, hbits
165 .macro regs_to_64, rd, hbits, lbits
167 orr \rd, \lbits, \hbits, lsl #32
171 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
172 * <symbol> is within the range +/- 4 GB of the PC.
175 * @dst: destination register (64 bit wide)
176 * @sym: name of the symbol
178 .macro adr_l, dst, sym
180 add \dst, \dst, :lo12:\sym
184 * @dst: destination register (32 or 64 bit wide)
185 * @sym: name of the symbol
186 * @tmp: optional 64-bit scratch register to be used if <dst> is a
187 * 32-bit wide register, in which case it cannot be used to hold
190 .macro ldr_l, dst, sym, tmp=
193 ldr \dst, [\dst, :lo12:\sym]
196 ldr \dst, [\tmp, :lo12:\sym]
201 * @src: source register (32 or 64 bit wide)
202 * @sym: name of the symbol
203 * @tmp: mandatory 64-bit scratch register to calculate the address
204 * while <src> needs to be preserved.
206 .macro str_l, src, sym, tmp
208 str \src, [\tmp, :lo12:\sym]
212 * @dst: destination register
214 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
215 .macro get_this_cpu_offset, dst
219 .macro get_this_cpu_offset, dst
220 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
227 .macro set_this_cpu_offset, src
228 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
237 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
238 * @sym: The name of the per-cpu variable
239 * @tmp: scratch register
241 .macro adr_this_cpu, dst, sym, tmp
243 add \dst, \tmp, #:lo12:\sym
244 get_this_cpu_offset \tmp
249 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
250 * @sym: The name of the per-cpu variable
251 * @tmp: scratch register
253 .macro ldr_this_cpu dst, sym, tmp
255 get_this_cpu_offset \tmp
256 ldr \dst, [\dst, \tmp]
260 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
262 .macro vma_vm_mm, rd, rn
263 ldr \rd, [\rn, #VMA_VM_MM]
267 * read_ctr - read CTR_EL0. If the system has mismatched register fields,
268 * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
271 #ifndef __KVM_NVHE_HYPERVISOR__
272 alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
273 mrs \reg, ctr_el0 // read CTR
276 ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
279 alternative_if_not ARM64_KVM_PROTECTED_MODE
281 alternative_else_nop_endif
282 alternative_cb ARM64_ALWAYS_SYSTEM, kvm_compute_final_ctr_el0
284 movk \reg, #0, lsl #16
285 movk \reg, #0, lsl #32
286 movk \reg, #0, lsl #48
293 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
294 * from the CTR register.
296 .macro raw_dcache_line_size, reg, tmp
297 mrs \tmp, ctr_el0 // read CTR
298 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
299 mov \reg, #4 // bytes per word
300 lsl \reg, \reg, \tmp // actual cache line size
304 * dcache_line_size - get the safe D-cache line size across all CPUs
306 .macro dcache_line_size, reg, tmp
308 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
309 mov \reg, #4 // bytes per word
310 lsl \reg, \reg, \tmp // actual cache line size
314 * raw_icache_line_size - get the minimum I-cache line size on this CPU
315 * from the CTR register.
317 .macro raw_icache_line_size, reg, tmp
318 mrs \tmp, ctr_el0 // read CTR
319 and \tmp, \tmp, #0xf // cache line size encoding
320 mov \reg, #4 // bytes per word
321 lsl \reg, \reg, \tmp // actual cache line size
325 * icache_line_size - get the safe I-cache line size across all CPUs
327 .macro icache_line_size, reg, tmp
329 and \tmp, \tmp, #0xf // cache line size encoding
330 mov \reg, #4 // bytes per word
331 lsl \reg, \reg, \tmp // actual cache line size
335 * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
337 .macro tcr_set_t0sz, valreg, t0sz
338 bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
342 * tcr_set_t1sz - update TCR.T1SZ
344 .macro tcr_set_t1sz, valreg, t1sz
345 bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
349 * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
350 * ID_AA64MMFR0_EL1.PARange value
352 * tcr: register with the TCR_ELx value to be updated
353 * pos: IPS or PS bitfield position
354 * tmp{0,1}: temporary registers
356 .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
357 mrs \tmp0, ID_AA64MMFR0_EL1
358 // Narrow PARange to fit the PS field in TCR_ELx
359 ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
360 mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
362 csel \tmp0, \tmp1, \tmp0, hi
363 bfi \tcr, \tmp0, \pos, #3
366 .macro __dcache_op_workaround_clean_cache, op, addr
367 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
375 * Macro to perform a data cache maintenance for the interval
376 * [start, end) with dcache line size explicitly provided.
378 * op: operation passed to dc instruction
379 * domain: domain used in dsb instruciton
380 * start: starting virtual address of the region
381 * end: end virtual address of the region
382 * linesz: dcache line size
383 * fixup: optional label to branch to on user fault
384 * Corrupts: start, end, tmp
386 .macro dcache_by_myline_op op, domain, start, end, linesz, tmp, fixup
387 sub \tmp, \linesz, #1
388 bic \start, \start, \tmp
391 __dcache_op_workaround_clean_cache \op, \start
394 __dcache_op_workaround_clean_cache \op, \start
397 sys 3, c7, c12, 1, \start // dc cvap
400 sys 3, c7, c13, 1, \start // dc cvadp
407 add \start, \start, \linesz
412 _cond_uaccess_extable .Ldcache_op\@, \fixup
416 * Macro to perform a data cache maintenance for the interval
419 * op: operation passed to dc instruction
420 * domain: domain used in dsb instruciton
421 * start: starting virtual address of the region
422 * end: end virtual address of the region
423 * fixup: optional label to branch to on user fault
424 * Corrupts: start, end, tmp1, tmp2
426 .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
427 dcache_line_size \tmp1, \tmp2
428 dcache_by_myline_op \op, \domain, \start, \end, \tmp1, \tmp2, \fixup
432 * Macro to perform an instruction cache maintenance for the interval
435 * start, end: virtual addresses describing the region
436 * fixup: optional label to branch to on user fault
437 * Corrupts: tmp1, tmp2
439 .macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup
440 icache_line_size \tmp1, \tmp2
442 bic \tmp2, \start, \tmp2
444 ic ivau, \tmp2 // invalidate I line PoU
445 add \tmp2, \tmp2, \tmp1
451 _cond_uaccess_extable .Licache_op\@, \fixup
455 * load_ttbr1 - install @pgtbl as a TTBR1 page table
457 * tmp1/tmp2 clobbered, either may overlap with pgtbl
459 .macro load_ttbr1, pgtbl, tmp1, tmp2
460 phys_to_ttbr \tmp1, \pgtbl
461 offset_ttbr1 \tmp1, \tmp2
467 * To prevent the possibility of old and new partial table walks being visible
468 * in the tlb, switch the ttbr to a zero page when we invalidate the old
469 * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
470 * Even switching to our copied tables will cause a changed output address at
471 * each stage of the walk.
473 .macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
474 phys_to_ttbr \tmp, \zero_page
479 load_ttbr1 \page_table, \tmp, \tmp2
483 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
485 .macro reset_pmuserenr_el0, tmpreg
486 mrs \tmpreg, id_aa64dfr0_el1
487 sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
488 cmp \tmpreg, #1 // Skip if no PMU present
490 msr pmuserenr_el0, xzr // Disable PMU access from EL0
495 * reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present
497 .macro reset_amuserenr_el0, tmpreg
498 mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1
499 ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
500 cbz \tmpreg, .Lskip_\@ // Skip if no AMU present
501 msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0
505 * copy_page - copy src to dest using temp registers t1-t8
507 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
508 9998: ldp \t1, \t2, [\src]
509 ldp \t3, \t4, [\src, #16]
510 ldp \t5, \t6, [\src, #32]
511 ldp \t7, \t8, [\src, #48]
513 stnp \t1, \t2, [\dest]
514 stnp \t3, \t4, [\dest, #16]
515 stnp \t5, \t6, [\dest, #32]
516 stnp \t7, \t8, [\dest, #48]
517 add \dest, \dest, #64
518 tst \src, #(PAGE_SIZE - 1)
523 * Annotate a function as being unsuitable for kprobes.
525 #ifdef CONFIG_KPROBES
526 #define NOKPROBE(x) \
527 .pushsection "_kprobe_blacklist", "aw"; \
534 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
535 #define EXPORT_SYMBOL_NOKASAN(name)
537 #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
541 * Emit a 64-bit absolute little endian symbol reference in a way that
542 * ensures that it will be resolved at build time, even when building a
543 * PIE binary. This requires cooperation from the linker script, which
544 * must emit the lo32/hi32 halves individually.
552 * mov_q - move an immediate constant into a 64-bit register using
553 * between 2 and 4 movz/movk instructions (depending on the
554 * magnitude and sign of the operand)
556 .macro mov_q, reg, val
557 .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
558 movz \reg, :abs_g1_s:\val
560 .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
561 movz \reg, :abs_g2_s:\val
563 movz \reg, :abs_g3:\val
564 movk \reg, :abs_g2_nc:\val
566 movk \reg, :abs_g1_nc:\val
568 movk \reg, :abs_g0_nc:\val
572 * Return the current task_struct.
574 .macro get_current_task, rd
579 * If the kernel is built for 52-bit virtual addressing but the hardware only
580 * supports 48 bits, we cannot program the pgdir address into TTBR1 directly,
581 * but we have to add an offset so that the TTBR1 address corresponds with the
582 * pgdir entry that covers the lowest 48-bit addressable VA.
584 * orr is used as it can cover the immediate value (and is idempotent).
585 * ttbr: Value of ttbr to set, modified.
587 .macro offset_ttbr1, ttbr, tmp
588 #ifdef CONFIG_ARM64_VA_BITS_52
590 and \tmp, \tmp, #TCR_T1SZ_MASK
591 cmp \tmp, #TCR_T1SZ(VA_BITS_MIN)
592 orr \tmp, \ttbr, #TTBR1_BADDR_4852_OFFSET
593 csel \ttbr, \tmp, \ttbr, eq
598 * Arrange a physical address in a TTBR register, taking care of 52-bit
601 * phys: physical address, preserved
602 * ttbr: returns the TTBR value
604 .macro phys_to_ttbr, ttbr, phys
605 #ifdef CONFIG_ARM64_PA_BITS_52
606 orr \ttbr, \phys, \phys, lsr #46
607 and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
613 .macro phys_to_pte, pte, phys
614 #ifdef CONFIG_ARM64_PA_BITS_52
616 * We assume \phys is 64K aligned and this is guaranteed by only
617 * supporting this configuration with 64K pages.
619 orr \pte, \phys, \phys, lsr #36
620 and \pte, \pte, #PTE_ADDR_MASK
626 .macro pte_to_phys, phys, pte
627 and \phys, \pte, #PTE_ADDR_MASK
628 #ifdef CONFIG_ARM64_PA_BITS_52
629 orr \phys, \phys, \phys, lsl #PTE_ADDR_HIGH_SHIFT
630 and \phys, \phys, GENMASK_ULL(PHYS_MASK_SHIFT - 1, PAGE_SHIFT)
635 * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
637 .macro tcr_clear_errata_bits, tcr, tmp1, tmp2
638 #ifdef CONFIG_FUJITSU_ERRATUM_010001
641 mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
642 and \tmp1, \tmp1, \tmp2
643 mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001
647 mov_q \tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
648 bic \tcr, \tcr, \tmp2
650 #endif /* CONFIG_FUJITSU_ERRATUM_010001 */
654 * Errata workaround prior to disable MMU. Insert an ISB immediately prior
655 * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
657 .macro pre_disable_mmu_workaround
658 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
664 * frame_push - Push @regcount callee saved registers to the stack,
665 * starting at x19, as well as x29/x30, and set x29 to
666 * the new value of sp. Add @extra bytes of stack space
669 .macro frame_push, regcount:req, extra
670 __frame st, \regcount, \extra
674 * frame_pop - Pop the callee saved registers from the stack that were
675 * pushed in the most recent call to frame_push, as well
676 * as x29/x30 and any extra stack space that may have been
683 .macro __frame_regs, reg1, reg2, op, num
684 .if .Lframe_regcount == \num
685 \op\()r \reg1, [sp, #(\num + 1) * 8]
686 .elseif .Lframe_regcount > \num
687 \op\()p \reg1, \reg2, [sp, #(\num + 1) * 8]
691 .macro __frame, op, regcount, extra=0
693 .if (\regcount) < 0 || (\regcount) > 10
694 .error "regcount should be in the range [0 ... 10]"
696 .if ((\extra) % 16) != 0
697 .error "extra should be a multiple of 16 bytes"
699 .ifdef .Lframe_regcount
700 .if .Lframe_regcount != -1
701 .error "frame_push/frame_pop may not be nested"
704 .set .Lframe_regcount, \regcount
705 .set .Lframe_extra, \extra
706 .set .Lframe_local_offset, ((\regcount + 3) / 2) * 16
707 stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
711 __frame_regs x19, x20, \op, 1
712 __frame_regs x21, x22, \op, 3
713 __frame_regs x23, x24, \op, 5
714 __frame_regs x25, x26, \op, 7
715 __frame_regs x27, x28, \op, 9
718 .if .Lframe_regcount == -1
719 .error "frame_push/frame_pop may not be nested"
721 ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
722 .set .Lframe_regcount, -1
727 * Set SCTLR_ELx to the @reg value, and invalidate the local icache
728 * in the process. This is called when setting the MMU on.
730 .macro set_sctlr, sreg, reg
734 * Invalidate the local I-cache so that any instructions fetched
735 * speculatively from the PoC are discarded, since they may have
736 * been dynamically patched at the PoU.
743 .macro set_sctlr_el1, reg
744 set_sctlr sctlr_el1, \reg
747 .macro set_sctlr_el2, reg
748 set_sctlr sctlr_el2, \reg
752 * Check whether asm code should yield as soon as it is able. This is
753 * the case if we are currently running in task context, and the
754 * TIF_NEED_RESCHED flag is set. (Note that the TIF_NEED_RESCHED flag
755 * is stored negated in the top word of the thread_info::preempt_count
758 .macro cond_yield, lbl:req, tmp:req, tmp2
759 #ifdef CONFIG_PREEMPT_VOLUNTARY
760 get_current_task \tmp
761 ldr \tmp, [\tmp, #TSK_TI_PREEMPT]
763 * If we are serving a softirq, there is no point in yielding: the
764 * softirq will not be preempted no matter what we do, so we should
765 * run to completion as quickly as we can. The preempt_count field will
766 * have BIT(SOFTIRQ_SHIFT) set in this case, so the zero check will
767 * catch this case too.
774 * Branch Target Identifier (BTI)
777 .equ .L__bti_targets_c, 34
778 .equ .L__bti_targets_j, 36
779 .equ .L__bti_targets_jc,38
780 hint #.L__bti_targets_\targets
784 * This macro emits a program property note section identifying
785 * architecture features which require special handling, mainly for
786 * use in assembly files included in the VDSO.
789 #define NT_GNU_PROPERTY_TYPE_0 5
790 #define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
792 #define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0)
793 #define GNU_PROPERTY_AARCH64_FEATURE_1_PAC (1U << 1)
795 #ifdef CONFIG_ARM64_BTI_KERNEL
796 #define GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT \
797 ((GNU_PROPERTY_AARCH64_FEATURE_1_BTI | \
798 GNU_PROPERTY_AARCH64_FEATURE_1_PAC))
801 #ifdef GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
802 .macro emit_aarch64_feature_1_and, feat=GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
803 .pushsection .note.gnu.property, "a"
807 .long NT_GNU_PROPERTY_TYPE_0
811 3: .long GNU_PROPERTY_AARCH64_FEATURE_1_AND
815 * This is described with an array of char in the Linux API
816 * spec but the text and all other usage (including binutils,
817 * clang and GCC) treat this as a 32 bit value so no swizzling
818 * is required for big endian.
828 .macro emit_aarch64_feature_1_and, feat=0
831 #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
833 .macro __mitigate_spectre_bhb_loop tmp
834 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
835 alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_iter
836 mov \tmp, #32 // Patched to correct the immediate
838 .Lspectre_bhb_loop\@:
841 b.ne .Lspectre_bhb_loop\@
843 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
846 .macro mitigate_spectre_bhb_loop tmp
847 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
848 alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_mitigation_enable
849 b .L_spectre_bhb_loop_done\@ // Patched to NOP
851 __mitigate_spectre_bhb_loop \tmp
852 .L_spectre_bhb_loop_done\@:
853 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
856 /* Save/restores x0-x3 to the stack */
857 .macro __mitigate_spectre_bhb_fw
858 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
859 stp x0, x1, [sp, #-16]!
860 stp x2, x3, [sp, #-16]!
861 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
862 alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit
863 nop // Patched to SMC/HVC #0
865 ldp x2, x3, [sp], #16
866 ldp x0, x1, [sp], #16
867 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
870 .macro mitigate_spectre_bhb_clear_insn
871 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
872 alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_clearbhb
873 /* Patched to NOP when not supported */
877 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
879 #endif /* __ASM_ASSEMBLER_H */