4 # This file is included by the global makefile so that you can add your own
5 # architecture-specific flags and dependencies.
7 # This file is subject to the terms and conditions of the GNU General Public
8 # License. See the file "COPYING" in the main directory of this archive
11 # Copyright (C) 1995-2001 by Russell King
13 LDFLAGS_vmlinux :=--no-undefined -X
15 ifeq ($(CONFIG_RELOCATABLE), y)
16 # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
17 # for relative relocs, since this leads to better Image compression
18 # with the relocation offsets always being zero.
19 LDFLAGS_vmlinux += -shared -Bsymbolic -z notext \
20 $(call ld-option, --no-apply-dynamic-relocs)
23 ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
24 ifeq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
25 LDFLAGS_vmlinux += --fix-cortex-a53-843419
29 cc_has_k_constraint := $(call try-run,echo \
31 asm volatile("and w0, w0, %w0" :: "K" (4294967295)); \
33 }' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1)
35 ifeq ($(CONFIG_BROKEN_GAS_INST),y)
36 $(warning Detected assembler with broken .inst; disassembly will be unreliable)
39 KBUILD_CFLAGS += -mgeneral-regs-only \
40 $(compat_vdso) $(cc_has_k_constraint)
41 KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
42 KBUILD_AFLAGS += $(compat_vdso)
44 KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
45 KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
47 # Avoid generating .eh_frame* sections.
48 KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
49 KBUILD_AFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
51 ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
52 prepare: stack_protector_prepare
53 stack_protector_prepare: prepare0
54 $(eval KBUILD_CFLAGS += -mstack-protector-guard=sysreg \
55 -mstack-protector-guard-reg=sp_el0 \
56 -mstack-protector-guard-offset=$(shell \
57 awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \
58 include/generated/asm-offsets.h))
61 # Ensure that if the compiler supports branch protection we default it
62 # off, this will be overridden if we are using branch protection.
63 branch-prot-flags-y += $(call cc-option,-mbranch-protection=none)
65 ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y)
66 branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
67 # We enable additional protection for leaf functions as there is some
68 # narrow potential for ROP protection benefits and no substantial
69 # performance impact has been observed.
70 ifeq ($(CONFIG_ARM64_BTI_KERNEL),y)
71 branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI) := -mbranch-protection=pac-ret+leaf+bti
73 branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf
75 # -march=armv8.3-a enables the non-nops instructions for PAC, to avoid the
76 # compiler to generate them and consequently to break the single image contract
77 # we pass it only to the assembler. This option is utilized only in case of non
78 # integrated assemblers.
79 ifeq ($(CONFIG_AS_HAS_PAC), y)
84 KBUILD_CFLAGS += $(branch-prot-flags-y)
86 ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
87 # make sure to pass the newest target architecture to -march.
91 ifeq ($(CONFIG_AS_HAS_ARMV8_5), y)
92 # make sure to pass the newest target architecture to -march.
97 KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \
98 -DARM64_ASM_ARCH='"$(asm-arch)"'
101 ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
102 KBUILD_CFLAGS += -ffixed-x18
105 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
106 KBUILD_CPPFLAGS += -mbig-endian
107 CHECKFLAGS += -D__AARCH64EB__
108 # Prefer the baremetal ELF build target, but not all toolchains include
109 # it so fall back to the standard linux version if needed.
110 KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb -z norelro)
111 UTS_MACHINE := aarch64_be
113 KBUILD_CPPFLAGS += -mlittle-endian
114 CHECKFLAGS += -D__AARCH64EL__
115 # Same as above, prefer ELF but fall back to linux target if needed.
116 KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux -z norelro)
117 UTS_MACHINE := aarch64
120 ifeq ($(CONFIG_LD_IS_LLD), y)
121 KBUILD_LDFLAGS += -z norelro
124 CHECKFLAGS += -D__aarch64__
126 ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y)
127 KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
128 CC_FLAGS_FTRACE := -fpatchable-function-entry=2
132 head-y := arch/arm64/kernel/head.o
134 ifeq ($(CONFIG_KASAN_SW_TAGS), y)
135 KASAN_SHADOW_SCALE_SHIFT := 4
136 else ifeq ($(CONFIG_KASAN_GENERIC), y)
137 KASAN_SHADOW_SCALE_SHIFT := 3
140 KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
141 KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
142 KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
144 libs-y := arch/arm64/lib/ $(libs-y)
145 libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
147 # Default target when executing plain make
148 boot := arch/arm64/boot
149 KBUILD_IMAGE := $(boot)/Image.gz
155 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
158 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
161 $(Q)$(MAKE) $(build)=$(boot) $@
163 PHONY += vdso_install
165 $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
166 $(if $(CONFIG_COMPAT_VDSO), \
167 $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
170 $(Q)$(MAKE) $(build)=arch/arm64/tools kapi
171 ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
172 ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
173 @echo "warning: ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum" >&2
176 ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS),y)
177 ifneq ($(CONFIG_ARM64_LSE_ATOMICS),y)
178 @echo "warning: LSE atomics not supported by binutils" >&2
183 # We use MRPROPER_FILES and CLEAN_FILES now
185 $(Q)$(MAKE) $(clean)=$(boot)
186 $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso
187 $(Q)$(MAKE) $(clean)=arch/arm64/kernel/vdso32
189 ifeq ($(KBUILD_EXTMOD),)
190 # We need to generate vdso-offsets.h before compiling certain files in kernel/.
191 # In order to do that, we should use the archprepare target, but we can't since
192 # asm-offsets.h is included in some files used to generate vdso-offsets.h, and
193 # asm-offsets.h is built in prepare0, for which archprepare is a dependency.
194 # Therefore we need to generate the header after prepare0 has been made, hence
196 prepare: vdso_prepare
197 vdso_prepare: prepare0
198 $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso \
199 include/generated/vdso-offsets.h arch/arm64/kernel/vdso/vdso.so
200 ifdef CONFIG_COMPAT_VDSO
201 $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 \
202 include/generated/vdso32-offsets.h arch/arm64/kernel/vdso32/vdso.so
207 echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
208 echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
209 echo ' install - Install uncompressed kernel'
210 echo ' zinstall - Install compressed kernel'
211 echo ' Install using (your) ~/bin/installkernel or'
212 echo ' (distribution) /sbin/installkernel or'
213 echo ' install to $$(INSTALL_PATH) and run lilo'