config HAVE_IMA_KEXEC
bool
-config SET_FS
- bool
-
config HOTPLUG_SMT
bool
config HAVE_NMI
bool
+ config HAVE_FUNCTION_DESCRIPTORS
+ bool
+
config TRACE_IRQFLAGS_SUPPORT
bool
config ARCH_SUPPORTS_SHADOW_CALL_STACK
bool
help
- An architecture should select this if it supports Clang's Shadow
- Call Stack and implements runtime support for shadow stack
+ An architecture should select this if it supports the compiler's
+ Shadow Call Stack and implements runtime support for shadow stack
switching.
config SHADOW_CALL_STACK
- bool "Clang Shadow Call Stack"
- depends on CC_IS_CLANG && ARCH_SUPPORTS_SHADOW_CALL_STACK
+ bool "Shadow Call Stack"
+ depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
depends on DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
help
- This option enables Clang's Shadow Call Stack, which uses a
- shadow stack to protect function return addresses from being
- overwritten by an attacker. More information can be found in
- Clang's documentation:
+ This option enables the compiler's Shadow Call Stack, which
+ uses a shadow stack to protect function return addresses from
+ being overwritten by an attacker. More information can be found
+ in the compiler's documentation:
- https://clang.llvm.org/docs/ShadowCallStack.html
+ - Clang: https://clang.llvm.org/docs/ShadowCallStack.html
+ - GCC: https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html#Instrumentation-Options
Note that security guarantees in the kernel differ from the
ones documented for user space. The kernel must store addresses
Architecture provides a function to run __do_softirq() on a
separate stack.
+config ALTERNATE_USER_ADDRESS_SPACE
+ bool
+ help
+ Architectures set this when the CPU uses separate address
+ spaces for kernel and user space pointers. In this case, the
+ access_ok() check on a __user pointer is skipped.
+
config PGTABLE_LEVELS
int
default 2
to the compiler, so it will attempt to add canary checks regardless
of the static branch state.
-config RANDOMIZE_KSTACK_OFFSET_DEFAULT
- bool "Randomize kernel stack offset on syscall entry"
+config RANDOMIZE_KSTACK_OFFSET
+ bool "Support for randomizing kernel stack offset on syscall entry" if EXPERT
+ default y
depends on HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
+ depends on INIT_STACK_NONE || !CC_IS_CLANG || CLANG_VERSION >= 140000
help
The kernel stack offset can be randomized (after pt_regs) by
roughly 5 bits of entropy, frustrating memory corruption
attacks that depend on stack address determinism or
- cross-syscall address exposures. This feature is controlled
- by kernel boot param "randomize_kstack_offset=on/off", and this
- config chooses the default boot state.
+ cross-syscall address exposures.
+
+ The feature is controlled via the "randomize_kstack_offset=on/off"
+ kernel boot param, and if turned off has zero overhead due to its use
+ of static branches (see JUMP_LABEL).
+
+ If unsure, say Y.
+
+config RANDOMIZE_KSTACK_OFFSET_DEFAULT
+ bool "Default state of kernel stack offset randomization"
+ depends on RANDOMIZE_KSTACK_OFFSET
+ help
+ Kernel stack offset randomization is controlled by kernel boot param
+ "randomize_kstack_offset=on/off", and this config chooses the default
+ boot state.
config ARCH_OPTIONAL_KERNEL_RWX
def_bool n
config HAVE_PREEMPT_DYNAMIC
bool
+
+config HAVE_PREEMPT_DYNAMIC_CALL
+ bool
depends on HAVE_STATIC_CALL
- depends on GENERIC_ENTRY
+ select HAVE_PREEMPT_DYNAMIC
help
- Select this if the architecture support boot time preempt setting
- on top of static calls. It is strongly advised to support inline
- static call to avoid any overhead.
+ An architecture should select this if it can handle the preemption
+ model being selected at boot time using static calls.
+
+ Where an architecture selects HAVE_STATIC_CALL_INLINE, any call to a
+ preemption function will be patched directly.
+
+ Where an architecture does not select HAVE_STATIC_CALL_INLINE, any
+ call to a preemption function will go through a trampoline, and the
+ trampoline will be patched.
+
+ It is strongly advised to support inline static call to avoid any
+ overhead.
+
+config HAVE_PREEMPT_DYNAMIC_KEY
+ bool
+ depends on HAVE_ARCH_JUMP_LABEL && CC_HAS_ASM_GOTO
+ select HAVE_PREEMPT_DYNAMIC
+ help
+ An architecture should select this if it can handle the preemption
+ model being selected at boot time using static keys.
+
+ Each preemption function will be given an early return based on a
+ static key. This should have slightly lower overhead than non-inline
+ static calls, as this effectively inlines each trampoline into the
+ start of its callee. This may avoid redundant work, and may
+ integrate better with CFI schemes.
+
+ This will have greater overhead than using inline static calls as
+ the call to the preemption function cannot be entirely elided.
config ARCH_WANT_LD_ORPHAN_WARN
bool
config IA64
bool
+ select ARCH_BINFMT_ELF_EXTRA_PHDRS
select ARCH_HAS_DMA_MARK_CLEAN
select ARCH_HAS_STRNCPY_FROM_USER
select ARCH_HAS_STRNLEN_USER
select HAVE_SETUP_PER_CPU_AREA
select TTY
select HAVE_ARCH_TRACEHOOK
+ select HAVE_FUNCTION_DESCRIPTORS
select HAVE_VIRT_CPU_ACCOUNTING
select HUGETLB_PAGE_SIZE_VARIABLE if HUGETLB_PAGE
select VIRT_TO_BUS
select NEED_SG_DMA_LENGTH
select NUMA if !FLATMEM
select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
- select SET_FS
select ZONE_DMA32
default y
help
# SPDX-License-Identifier: GPL-2.0
config PARISC
def_bool y
+ select ALTERNATE_USER_ADDRESS_SPACE
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_FUNCTION_TRACER
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_HAS_PTE_SPECIAL
select ARCH_NO_SG_CHAIN
select ARCH_SUPPORTS_HUGETLBFS if PA20
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_STACKWALK
+ select ARCH_HAS_DEBUG_VM_PGTABLE
select HAVE_RELIABLE_STACKTRACE
select DMA_OPS
select RTC_CLASS
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
select TRACE_IRQFLAGS_SUPPORT
+ select HAVE_FUNCTION_DESCRIPTORS if 64BIT
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
endchoice
-config PARISC_SELF_EXTRACT
- bool "Build kernel as self-extracting executable"
- default y
- help
- Say Y if you want to build the parisc kernel as a kind of
- self-extracting executable.
-
- If you say N here, the kernel will be compressed with gzip
- which can be loaded by the palo bootloader directly too.
-
- If you don't know what to do here, say Y.
-
config SMP
bool "Symmetric multi-processing support"
help
bool
default y if PPC64
+ config LIVEPATCH_64
+ def_bool PPC64
+ depends on LIVEPATCH
+
config MMU
bool
default y
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_HAS_COPY_MC if PPC64
+ select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_WX if STRICT_KERNEL_RWX
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S || PPC_8xx || 40x) && !HIBERNATION
select ARCH_HAS_STRICT_KERNEL_RWX if FSL_BOOKE && !HIBERNATION && !RANDOMIZE_BASE
- select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX && !PPC_BOOK3S_32
+ select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
select ARCH_HAS_UBSAN_SANITIZE_ALL
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_ARGS if MPROFILE_KERNEL || PPC32
select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL || PPC32
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_DESCRIPTORS if PPC64 && !CPU_LITTLE_ENDIAN
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
- select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS && PPC64
+ select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
select HAVE_OPTPROBES
unsigned long clr, unsigned long set, int huge)
{
pte_basic_t old;
- unsigned long tmp;
- __asm__ __volatile__(
+ if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+ unsigned long tmp;
+
+ asm volatile(
#ifndef CONFIG_PTE_64BIT
- "1: lwarx %0, 0, %3\n"
- " andc %1, %0, %4\n"
+ "1: lwarx %0, 0, %3\n"
+ " andc %1, %0, %4\n"
#else
- "1: lwarx %L0, 0, %3\n"
- " lwz %0, -4(%3)\n"
- " andc %1, %L0, %4\n"
+ "1: lwarx %L0, 0, %3\n"
+ " lwz %0, -4(%3)\n"
+ " andc %1, %L0, %4\n"
#endif
- " or %1, %1, %5\n"
- " stwcx. %1, 0, %3\n"
- " bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*p)
+ " or %1, %1, %5\n"
+ " stwcx. %1, 0, %3\n"
+ " bne- 1b"
+ : "=&r" (old), "=&r" (tmp), "=m" (*p)
#ifndef CONFIG_PTE_64BIT
- : "r" (p),
+ : "r" (p),
#else
- : "b" ((unsigned long)(p) + 4),
+ : "b" ((unsigned long)(p) + 4),
#endif
- "r" (clr), "r" (set), "m" (*p)
- : "cc" );
+ "r" (clr), "r" (set), "m" (*p)
+ : "cc" );
+ } else {
+ old = pte_val(*p);
+
+ *p = __pte((old & ~(pte_basic_t)clr) | set);
+ }
return old;
}
#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
-#define pmd_page(pmd) \
- pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
+#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
+#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
/*
* Encode and decode a swap entry.
return pte_val(pte) & _PAGE_ACCESSED;
}
- #define __HAVE_ARCH_PTE_SAME
- #define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0)
-
/*
* Note that on Book E processors, the pmd contains the kernel virtual
* (lowmem) address of the pte page. The physical address is less useful
* of the pte page. -- paulus
*/
#ifndef CONFIG_BOOKE
-#define pmd_page(pmd) \
- pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
+#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
#else
#define pmd_page_vaddr(pmd) \
((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
-#define pmd_page(pmd) \
- pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
+#define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
#endif
+#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
/*
* Encode and decode a swap entry.
* Note that the bits we use in a PTE for representing a swap entry
#define pmd_present(pmd) (!pmd_none(pmd))
#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
extern struct page *pmd_page(pmd_t pmd);
+#define pmd_pfn(pmd) (page_to_pfn(pmd_page(pmd)))
static inline void pud_set(pud_t *pudp, unsigned long val)
{
flush_tlb_page(vma, address);
}
- #define __HAVE_ARCH_PTE_SAME
- #define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0)
-
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
#ifdef __powerpc64__
/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
#define TASK_SIZE_MAX TASK_SIZE_USER64
-#else
-#define TASK_SIZE_MAX TASK_SIZE
#endif
-static inline bool __access_ok(unsigned long addr, unsigned long size)
-{
- return addr < TASK_SIZE_MAX && size <= TASK_SIZE_MAX - addr;
-}
-
-#define access_ok(addr, size) \
- (__chk_user_ptr(addr), \
- __access_ok((unsigned long)(addr), (size)))
+#include <asm-generic/access_ok.h>
/*
* These are the main single-value transfer routines. They automatically
*/
#define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
__asm__ __volatile__( \
+ ".machine push\n" \
+ ".machine altivec\n" \
"1: lvx 0,0,%1 # get user\n" \
" stvx 0,0,%2 # put kernel\n" \
+ ".machine pop\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: li %0,%3\n" \
unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
} while (0)
-#define HAVE_GET_KERNEL_NOFAULT
-
#define __get_kernel_nofault(dst, src, type, err_label) \
__get_user_size_goto(*((type *)(dst)), \
(__force type __user *)(src), sizeof(type), err_label)
return 1;
}
+ /*
+ * If CMA activation fails, keep the pages reserved, instead of
+ * exposing them to buddy allocator. Same as 'fadump=nocma' case.
+ */
+ cma_reserve_pages_on_error(fadump_cma);
+
/*
* So we now have successfully initialized cma area for fadump.
*/
if (!fw_dump.nocma) {
fw_dump.boot_memory_size =
ALIGN(fw_dump.boot_memory_size,
- FADUMP_CMA_ALIGNMENT);
+ CMA_MIN_ALIGNMENT_BYTES);
}
#endif
if (fw_dump.ops->fadump_process(&fw_dump) < 0)
fadump_invalidate_release_mem();
}
- /* Initialize the kernel dump memory structure for FAD registration. */
- else if (fw_dump.reserve_dump_area_size)
+ /* Initialize the kernel dump memory structure and register with f/w */
+ else if (fw_dump.reserve_dump_area_size) {
fw_dump.ops->fadump_init_mem_struct(&fw_dump);
+ register_fadump();
+ }
/*
* In case of panic, fadump is triggered via ppc_panic_event()
return 1;
}
- subsys_initcall(setup_fadump);
+ /*
+ * Use subsys_initcall_sync() here because there is dependency with
+ * crash_save_vmcoreinfo_init(), which mush run first to ensure vmcoreinfo initialization
+ * is done before regisering with f/w.
+ */
+ subsys_initcall_sync(setup_fadump);
#else /* !CONFIG_PRESERVE_FA_DUMP */
/* Scan the Firmware Assisted dump configuration details. */
mtspr SPRN_DBAT##n##L,RB
__HEAD
- .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
- .stabs "head_book3s_32.S",N_SO,0,0,0f
- 0:
- _ENTRY(_stext);
+ _GLOBAL(_stext);
/*
* _start is defined this way because the XCOFF loader in the OpenFirmware
* on the powermac expects the entry point to be a procedure descriptor.
*/
- _ENTRY(_start);
+ _GLOBAL(_start);
/*
* These are here for legacy reasons, the kernel used to
* need to look like a coff function entry for the pmac
*/
/* Get PTE (linux-style) and check access */
mfspr r3,SPRN_IMISS
-#ifdef CONFIG_MODULES
+#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
#endif
mfspr r2, SPRN_SDR1
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
rlwinm r2, r2, 28, 0xfffff000
-#ifdef CONFIG_MODULES
+#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
lwz r0,0(r2) /* get linux-style pte */
andc. r1,r1,r0 /* check access & ~permission */
bne- DataAddressInvalid /* return if access not permitted */
- /*
- * NOTE! We are assuming this is not an SMP system, otherwise
- * we would need to update the pte atomically with lwarx/stwcx.
- */
/* Convert linux-style PTE to low word of PPC-style PTE */
rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
+ rlwimi r1,r0,32-3,24,24 /* _PAGE_RW -> _PAGE_DIRTY */
rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
+ xori r1,r1,_PAGE_DIRTY /* clear dirty when not rw */
ori r1,r1,0xe04 /* clear out reserved bits */
andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
BEGIN_FTR_SECTION
lwz r0,0(r2) /* get linux-style pte */
andc. r1,r1,r0 /* check access & ~permission */
bne- DataAddressInvalid /* return if access not permitted */
- /*
- * NOTE! We are assuming this is not an SMP system, otherwise
- * we would need to update the pte atomically with lwarx/stwcx.
- */
/* Convert linux-style PTE to low word of PPC-style PTE */
rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
li r1,0xe06 /* clear out reserved bits & PP msb */
* r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
* on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
*/
- _ENTRY(copy_and_flush)
+ _GLOBAL(copy_and_flush)
addi r5,r5,-4
addi r6,r6,-4
4: li r0,L1_CACHE_BYTES/4
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr
- _ENTRY(update_bats)
+ _GLOBAL(update_bats)
lis r4, 1f@h
ori r4, r4, 1f@l
tophys(r4, r4)
static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
unsigned long val)
{
- #ifdef __powerpc64__
if ((msr & MSR_64BIT) == 0)
val &= 0xffffffffUL;
- #endif
return val;
}
{
if (!user_mode(regs))
return 1;
- if (__access_ok(ea, nb))
+ if (access_ok((void __user *)ea, nb))
return 1;
- if (__access_ok(ea, 1))
+ if (access_ok((void __user *)ea, 1))
/* Access overlaps the end of the user region */
regs->dar = TASK_SIZE_MAX - 1;
else
int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
{
int err;
- unsigned long size;
+ unsigned long size = l1_dcache_bytes();
- #ifdef __powerpc64__
- size = ppc64_caches.l1d.block_size;
- if (!(regs->msr & MSR_64BIT))
- ea &= 0xffffffffUL;
- #else
- size = L1_CACHE_BYTES;
- #endif
+ ea = truncate_if_32bit(regs->msr, ea);
ea &= ~(size - 1);
if (!address_ok(regs, ea, size))
return -EFAULT;
#define __put_user_asmx(x, addr, err, op, cr) \
__asm__ __volatile__( \
+ ".machine push\n" \
+ ".machine power8\n" \
"1: " op " %2,0,%3\n" \
+ ".machine pop\n" \
" mfcr %1\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
#define __get_user_asmx(x, addr, err, op) \
__asm__ __volatile__( \
+ ".machine push\n" \
+ ".machine power8\n" \
"1: "op" %1,0,%2\n" \
+ ".machine pop\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: li %0,%3\n" \
op->type |= SETCC;
op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
- #ifdef __powerpc64__
if (!(regs->msr & MSR_64BIT))
val = (int) val;
- #endif
if (val < 0)
op->ccval |= 0x80000000;
else if (val > 0)
op->type = COMPUTE + SETREG + SETXER;
op->reg = rd;
op->val = val;
- #ifdef __powerpc64__
- if (!(regs->msr & MSR_64BIT)) {
- val = (unsigned int) val;
- val1 = (unsigned int) val1;
- }
- #endif
+ val = truncate_if_32bit(regs->msr, val);
+ val1 = truncate_if_32bit(regs->msr, val1);
op->xerval = regs->xer;
if (val < val1 || (carry_in && val == val1))
op->xerval |= XER_CA;
case BARRIER_EIEIO:
eieio();
break;
+#ifdef CONFIG_PPC64
case BARRIER_LWSYNC:
asm volatile("lwsync" : : : "memory");
break;
case BARRIER_PTESYNC:
asm volatile("ptesync" : : : "memory");
break;
+#endif
}
break;
__put_user_asmx(op->val, ea, err, "stbcx.", cr);
break;
case 2:
- __put_user_asmx(op->val, ea, err, "stbcx.", cr);
+ __put_user_asmx(op->val, ea, err, "sthcx.", cr);
break;
#endif
case 4:
*/
tmp_idx = ctx->idx;
ctx->idx = addrs[i] / 4;
- bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+ if (ret)
+ return ret;
/*
* Restore ctx->idx here. This is safe as the length
tmp_idx = ctx->idx;
ctx->idx = addrs[i] / 4;
#ifdef CONFIG_PPC32
- PPC_LI32(ctx->b2p[insn[i].dst_reg] - 1, (u32)insn[i + 1].imm);
- PPC_LI32(ctx->b2p[insn[i].dst_reg], (u32)insn[i].imm);
+ PPC_LI32(bpf_to_ppc(insn[i].dst_reg) - 1, (u32)insn[i + 1].imm);
+ PPC_LI32(bpf_to_ppc(insn[i].dst_reg), (u32)insn[i].imm);
for (j = ctx->idx - addrs[i] / 4; j < 4; j++)
EMIT(PPC_RAW_NOP());
#else
func_addr = ((u64)(u32)insn[i].imm) | (((u64)(u32)insn[i + 1].imm) << 32);
- PPC_LI64(b2p[insn[i].dst_reg], func_addr);
+ PPC_LI64(bpf_to_ppc(insn[i].dst_reg), func_addr);
/* overwrite rest with nops */
for (j = ctx->idx - addrs[i] / 4; j < 5; j++)
EMIT(PPC_RAW_NOP());
return 0;
}
+ int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr)
+ {
+ if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) {
+ PPC_JMP(exit_addr);
+ } else if (ctx->alt_exit_addr) {
+ if (WARN_ON(!is_offset_in_branch_range((long)ctx->alt_exit_addr - (ctx->idx * 4))))
+ return -1;
+ PPC_JMP(ctx->alt_exit_addr);
+ } else {
+ ctx->alt_exit_addr = ctx->idx * 4;
+ bpf_jit_build_epilogue(image, ctx);
+ }
+
+ return 0;
+ }
+
struct powerpc64_jit_data {
struct bpf_binary_header *header;
u32 *addrs;
}
memset(&cgctx, 0, sizeof(struct codegen_context));
- memcpy(cgctx.b2p, b2p, sizeof(cgctx.b2p));
+ bpf_jit_init_reg_mapping(&cgctx);
/* Make sure that the stack is quadword aligned. */
cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
* If we have seen a tail call, we need a second pass.
* This is because bpf_jit_emit_common_epilogue() is called
* from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
+ * We also need a second pass if we ended up with too large
+ * a program so as to ensure BPF_EXIT branches are in range.
*/
- if (cgctx.seen & SEEN_TAILCALL) {
+ if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
cgctx.idx = 0;
if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
fp = org_fp;
* calculate total size from idx.
*/
bpf_jit_build_prologue(0, &cgctx);
+ addrs[fp->len] = cgctx.idx * 4;
bpf_jit_build_epilogue(0, &cgctx);
fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4;
for (pass = 1; pass < 3; pass++) {
/* Now build the prologue, body code & epilogue for real. */
cgctx.idx = 0;
+ cgctx.alt_exit_addr = 0;
bpf_jit_build_prologue(code_base, &cgctx);
if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass)) {
bpf_jit_binary_free(bpf_hdr);
fp->jited = 1;
fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
- bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
+ bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + bpf_hdr->size);
if (!fp->is_func || extra_pass) {
bpf_jit_binary_lock_ro(bpf_hdr);
bpf_prog_fill_jited_linfo(fp, addrs);