1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
5 * Copyright (C) 1996-2000 Russell King
6 * Copyright (C) 2012 ARM Ltd.
9 #error "Only include this from assembly code"
12 #ifndef __ASM_ASSEMBLER_H
13 #define __ASM_ASSEMBLER_H
15 #include <asm-generic/export.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/alternative.h>
19 #include <asm/cpufeature.h>
20 #include <asm/cputype.h>
21 #include <asm/debug-monitors.h>
23 #include <asm/pgtable-hwdef.h>
24 #include <asm/ptrace.h>
25 #include <asm/thread_info.h>
28 * Provide a wxN alias for each wN register so what we can paste a xN
29 * reference after a 'w' to obtain the 32-bit version.
31 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
35 .macro save_and_disable_daif, flags
48 .macro restore_daif, flags:req
52 /* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */
58 * Save/restore interrupts.
60 .macro save_and_disable_irq, flags
65 .macro restore_irq, flags
73 .macro disable_step_tsk, flgs, tmp
74 tbz \flgs, #TIF_SINGLESTEP, 9990f
76 bic \tmp, \tmp, #DBG_MDSCR_SS
78 isb // Synchronise with enable_dbg
82 /* call with daif masked */
83 .macro enable_step_tsk, flgs, tmp
84 tbz \flgs, #TIF_SINGLESTEP, 9990f
86 orr \tmp, \tmp, #DBG_MDSCR_SS
92 * RAS Error Synchronization barrier
95 #ifdef CONFIG_ARM64_RAS_EXTN
103 * Value prediction barrier
110 * Speculation barrier
113 alternative_if_not ARM64_HAS_SB
132 * Emit an entry into the exception table
134 .macro _asm_extable, from, to
135 .pushsection __ex_table, "a"
137 .long (\from - .), (\to - .)
141 #define USER(l, x...) \
143 _asm_extable 9999b, l
148 lr .req x30 // link register
159 * Select code when configured for BE.
161 #ifdef CONFIG_CPU_BIG_ENDIAN
162 #define CPU_BE(code...) code
164 #define CPU_BE(code...)
168 * Select code when configured for LE.
170 #ifdef CONFIG_CPU_BIG_ENDIAN
171 #define CPU_LE(code...)
173 #define CPU_LE(code...) code
177 * Define a macro that constructs a 64-bit value by concatenating two
178 * 32-bit registers. Note that on big endian systems the order of the
179 * registers is swapped.
181 #ifndef CONFIG_CPU_BIG_ENDIAN
182 .macro regs_to_64, rd, lbits, hbits
184 .macro regs_to_64, rd, hbits, lbits
186 orr \rd, \lbits, \hbits, lsl #32
190 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
191 * <symbol> is within the range +/- 4 GB of the PC.
194 * @dst: destination register (64 bit wide)
195 * @sym: name of the symbol
197 .macro adr_l, dst, sym
199 add \dst, \dst, :lo12:\sym
203 * @dst: destination register (32 or 64 bit wide)
204 * @sym: name of the symbol
205 * @tmp: optional 64-bit scratch register to be used if <dst> is a
206 * 32-bit wide register, in which case it cannot be used to hold
209 .macro ldr_l, dst, sym, tmp=
212 ldr \dst, [\dst, :lo12:\sym]
215 ldr \dst, [\tmp, :lo12:\sym]
220 * @src: source register (32 or 64 bit wide)
221 * @sym: name of the symbol
222 * @tmp: mandatory 64-bit scratch register to calculate the address
223 * while <src> needs to be preserved.
225 .macro str_l, src, sym, tmp
227 str \src, [\tmp, :lo12:\sym]
231 * @dst: destination register
233 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
234 .macro this_cpu_offset, dst
238 .macro this_cpu_offset, dst
239 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
248 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
249 * @sym: The name of the per-cpu variable
250 * @tmp: scratch register
252 .macro adr_this_cpu, dst, sym, tmp
254 add \dst, \tmp, #:lo12:\sym
260 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
261 * @sym: The name of the per-cpu variable
262 * @tmp: scratch register
264 .macro ldr_this_cpu dst, sym, tmp
267 ldr \dst, [\dst, \tmp]
271 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
273 .macro vma_vm_mm, rd, rn
274 ldr \rd, [\rn, #VMA_VM_MM]
278 * read_ctr - read CTR_EL0. If the system has mismatched register fields,
279 * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
282 alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
283 mrs \reg, ctr_el0 // read CTR
286 ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
292 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
293 * from the CTR register.
295 .macro raw_dcache_line_size, reg, tmp
296 mrs \tmp, ctr_el0 // read CTR
297 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
298 mov \reg, #4 // bytes per word
299 lsl \reg, \reg, \tmp // actual cache line size
303 * dcache_line_size - get the safe D-cache line size across all CPUs
305 .macro dcache_line_size, reg, tmp
307 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
308 mov \reg, #4 // bytes per word
309 lsl \reg, \reg, \tmp // actual cache line size
313 * raw_icache_line_size - get the minimum I-cache line size on this CPU
314 * from the CTR register.
316 .macro raw_icache_line_size, reg, tmp
317 mrs \tmp, ctr_el0 // read CTR
318 and \tmp, \tmp, #0xf // cache line size encoding
319 mov \reg, #4 // bytes per word
320 lsl \reg, \reg, \tmp // actual cache line size
324 * icache_line_size - get the safe I-cache line size across all CPUs
326 .macro icache_line_size, reg, tmp
328 and \tmp, \tmp, #0xf // cache line size encoding
329 mov \reg, #4 // bytes per word
330 lsl \reg, \reg, \tmp // actual cache line size
334 * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
336 .macro tcr_set_t0sz, valreg, t0sz
337 bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
341 * tcr_set_t1sz - update TCR.T1SZ
343 .macro tcr_set_t1sz, valreg, t1sz
344 bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
348 * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
349 * ID_AA64MMFR0_EL1.PARange value
351 * tcr: register with the TCR_ELx value to be updated
352 * pos: IPS or PS bitfield position
353 * tmp{0,1}: temporary registers
355 .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
356 mrs \tmp0, ID_AA64MMFR0_EL1
357 // Narrow PARange to fit the PS field in TCR_ELx
358 ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
359 mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX
361 csel \tmp0, \tmp1, \tmp0, hi
362 bfi \tcr, \tmp0, \pos, #3
366 * Macro to perform a data cache maintenance for the interval
367 * [kaddr, kaddr + size)
369 * op: operation passed to dc instruction
370 * domain: domain used in dsb instruciton
371 * kaddr: starting virtual address of the region
372 * size: size of the region
373 * Corrupts: kaddr, size, tmp1, tmp2
375 .macro __dcache_op_workaround_clean_cache, op, kaddr
376 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
383 .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
384 dcache_line_size \tmp1, \tmp2
385 add \size, \kaddr, \size
387 bic \kaddr, \kaddr, \tmp2
390 __dcache_op_workaround_clean_cache \op, \kaddr
393 __dcache_op_workaround_clean_cache \op, \kaddr
396 sys 3, c7, c12, 1, \kaddr // dc cvap
399 sys 3, c7, c13, 1, \kaddr // dc cvadp
406 add \kaddr, \kaddr, \tmp1
413 * Macro to perform an instruction cache maintenance for the interval
416 * start, end: virtual addresses describing the region
417 * label: A label to branch to on user fault.
418 * Corrupts: tmp1, tmp2
420 .macro invalidate_icache_by_line start, end, tmp1, tmp2, label
421 icache_line_size \tmp1, \tmp2
423 bic \tmp2, \start, \tmp2
425 USER(\label, ic ivau, \tmp2) // invalidate I line PoU
426 add \tmp2, \tmp2, \tmp1
434 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
436 .macro reset_pmuserenr_el0, tmpreg
437 mrs \tmpreg, id_aa64dfr0_el1
438 sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
439 cmp \tmpreg, #1 // Skip if no PMU present
441 msr pmuserenr_el0, xzr // Disable PMU access from EL0
446 * reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present
448 .macro reset_amuserenr_el0, tmpreg
449 mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1
450 ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4
451 cbz \tmpreg, .Lskip_\@ // Skip if no AMU present
452 msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0
456 * copy_page - copy src to dest using temp registers t1-t8
458 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
459 9998: ldp \t1, \t2, [\src]
460 ldp \t3, \t4, [\src, #16]
461 ldp \t5, \t6, [\src, #32]
462 ldp \t7, \t8, [\src, #48]
464 stnp \t1, \t2, [\dest]
465 stnp \t3, \t4, [\dest, #16]
466 stnp \t5, \t6, [\dest, #32]
467 stnp \t7, \t8, [\dest, #48]
468 add \dest, \dest, #64
469 tst \src, #(PAGE_SIZE - 1)
474 * Annotate a function as being unsuitable for kprobes.
476 #ifdef CONFIG_KPROBES
477 #define NOKPROBE(x) \
478 .pushsection "_kprobe_blacklist", "aw"; \
485 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
486 #define EXPORT_SYMBOL_NOKASAN(name)
488 #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
492 * Emit a 64-bit absolute little endian symbol reference in a way that
493 * ensures that it will be resolved at build time, even when building a
494 * PIE binary. This requires cooperation from the linker script, which
495 * must emit the lo32/hi32 halves individually.
503 * mov_q - move an immediate constant into a 64-bit register using
504 * between 2 and 4 movz/movk instructions (depending on the
505 * magnitude and sign of the operand)
507 .macro mov_q, reg, val
508 .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
509 movz \reg, :abs_g1_s:\val
511 .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
512 movz \reg, :abs_g2_s:\val
514 movz \reg, :abs_g3:\val
515 movk \reg, :abs_g2_nc:\val
517 movk \reg, :abs_g1_nc:\val
519 movk \reg, :abs_g0_nc:\val
523 * Return the current task_struct.
525 .macro get_current_task, rd
530 * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
531 * orr is used as it can cover the immediate value (and is idempotent).
532 * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
533 * ttbr: Value of ttbr to set, modified.
535 .macro offset_ttbr1, ttbr, tmp
536 #ifdef CONFIG_ARM64_VA_BITS_52
537 mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
538 and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
539 cbnz \tmp, .Lskipoffs_\@
540 orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
546 * Perform the reverse of offset_ttbr1.
547 * bic is used as it can cover the immediate value and, in future, won't need
548 * to be nop'ed out when dealing with 52-bit kernel VAs.
550 .macro restore_ttbr1, ttbr
551 #ifdef CONFIG_ARM64_VA_BITS_52
552 bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
557 * Arrange a physical address in a TTBR register, taking care of 52-bit
560 * phys: physical address, preserved
561 * ttbr: returns the TTBR value
563 .macro phys_to_ttbr, ttbr, phys
564 #ifdef CONFIG_ARM64_PA_BITS_52
565 orr \ttbr, \phys, \phys, lsr #46
566 and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
572 .macro phys_to_pte, pte, phys
573 #ifdef CONFIG_ARM64_PA_BITS_52
575 * We assume \phys is 64K aligned and this is guaranteed by only
576 * supporting this configuration with 64K pages.
578 orr \pte, \phys, \phys, lsr #36
579 and \pte, \pte, #PTE_ADDR_MASK
585 .macro pte_to_phys, phys, pte
586 #ifdef CONFIG_ARM64_PA_BITS_52
587 ubfiz \phys, \pte, #(48 - 16 - 12), #16
588 bfxil \phys, \pte, #16, #32
589 lsl \phys, \phys, #16
591 and \phys, \pte, #PTE_ADDR_MASK
596 * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
598 .macro tcr_clear_errata_bits, tcr, tmp1, tmp2
599 #ifdef CONFIG_FUJITSU_ERRATUM_010001
602 mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
603 and \tmp1, \tmp1, \tmp2
604 mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001
608 mov_q \tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
609 bic \tcr, \tcr, \tmp2
611 #endif /* CONFIG_FUJITSU_ERRATUM_010001 */
615 * Errata workaround prior to disable MMU. Insert an ISB immediately prior
616 * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
618 .macro pre_disable_mmu_workaround
619 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
625 * frame_push - Push @regcount callee saved registers to the stack,
626 * starting at x19, as well as x29/x30, and set x29 to
627 * the new value of sp. Add @extra bytes of stack space
630 .macro frame_push, regcount:req, extra
631 __frame st, \regcount, \extra
635 * frame_pop - Pop the callee saved registers from the stack that were
636 * pushed in the most recent call to frame_push, as well
637 * as x29/x30 and any extra stack space that may have been
644 .macro __frame_regs, reg1, reg2, op, num
645 .if .Lframe_regcount == \num
646 \op\()r \reg1, [sp, #(\num + 1) * 8]
647 .elseif .Lframe_regcount > \num
648 \op\()p \reg1, \reg2, [sp, #(\num + 1) * 8]
652 .macro __frame, op, regcount, extra=0
654 .if (\regcount) < 0 || (\regcount) > 10
655 .error "regcount should be in the range [0 ... 10]"
657 .if ((\extra) % 16) != 0
658 .error "extra should be a multiple of 16 bytes"
660 .ifdef .Lframe_regcount
661 .if .Lframe_regcount != -1
662 .error "frame_push/frame_pop may not be nested"
665 .set .Lframe_regcount, \regcount
666 .set .Lframe_extra, \extra
667 .set .Lframe_local_offset, ((\regcount + 3) / 2) * 16
668 stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
672 __frame_regs x19, x20, \op, 1
673 __frame_regs x21, x22, \op, 3
674 __frame_regs x23, x24, \op, 5
675 __frame_regs x25, x26, \op, 7
676 __frame_regs x27, x28, \op, 9
679 .if .Lframe_regcount == -1
680 .error "frame_push/frame_pop may not be nested"
682 ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
683 .set .Lframe_regcount, -1
688 * Set SCTLR_EL1 to the passed value, and invalidate the local icache
689 * in the process. This is called when setting the MMU on.
691 .macro set_sctlr_el1, reg
695 * Invalidate the local I-cache so that any instructions fetched
696 * speculatively from the PoC are discarded, since they may have
697 * been dynamically patched at the PoU.
705 * Check whether preempt/bh-disabled asm code should yield as soon as
706 * it is able. This is the case if we are currently running in task
707 * context, and either a softirq is pending, or the TIF_NEED_RESCHED
708 * flag is set and re-enabling preemption a single time would result in
709 * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
710 * stored negated in the top word of the thread_info::preempt_count
713 .macro cond_yield, lbl:req, tmp:req, tmp2:req
714 get_current_task \tmp
715 ldr \tmp, [\tmp, #TSK_TI_PREEMPT]
717 * If we are serving a softirq, there is no point in yielding: the
718 * softirq will not be preempted no matter what we do, so we should
719 * run to completion as quickly as we can.
721 tbnz \tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
722 #ifdef CONFIG_PREEMPTION
723 sub \tmp, \tmp, #PREEMPT_DISABLE_OFFSET
726 adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
727 this_cpu_offset \tmp2
728 ldr w\tmp, [\tmp, \tmp2]
729 cbnz w\tmp, \lbl // yield on pending softirq in task context
734 * This macro emits a program property note section identifying
735 * architecture features which require special handling, mainly for
736 * use in assembly files included in the VDSO.
739 #define NT_GNU_PROPERTY_TYPE_0 5
740 #define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
742 #define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0)
743 #define GNU_PROPERTY_AARCH64_FEATURE_1_PAC (1U << 1)
745 #ifdef CONFIG_ARM64_BTI_KERNEL
746 #define GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT \
747 ((GNU_PROPERTY_AARCH64_FEATURE_1_BTI | \
748 GNU_PROPERTY_AARCH64_FEATURE_1_PAC))
751 #ifdef GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
752 .macro emit_aarch64_feature_1_and, feat=GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
753 .pushsection .note.gnu.property, "a"
757 .long NT_GNU_PROPERTY_TYPE_0
761 3: .long GNU_PROPERTY_AARCH64_FEATURE_1_AND
765 * This is described with an array of char in the Linux API
766 * spec but the text and all other usage (including binutils,
767 * clang and GCC) treat this as a 32 bit value so no swizzling
768 * is required for big endian.
778 .macro emit_aarch64_feature_1_and, feat=0
781 #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
783 #endif /* __ASM_ASSEMBLER_H */