Merge branches 'misc', 'sa1100-for-next' and 'spectre' into for-linus
authorRussell King <rmk+kernel@armlinux.org.uk>
Wed, 2 Jan 2019 10:37:05 +0000 (10:37 +0000)
committerRussell King <rmk+kernel@armlinux.org.uk>
Wed, 2 Jan 2019 10:37:05 +0000 (10:37 +0000)
31 files changed:
arch/arm/Kconfig
arch/arm/boot/compressed/atags_to_fdt.c
arch/arm/include/asm/assembler.h
arch/arm/include/asm/cputype.h
arch/arm/include/asm/proc-fns.h
arch/arm/include/asm/uaccess.h
arch/arm/kernel/bugs.c
arch/arm/kernel/head-common.S
arch/arm/kernel/head.S
arch/arm/kernel/setup.c
arch/arm/kernel/smp.c
arch/arm/lib/copy_from_user.S
arch/arm/lib/copy_to_user.S
arch/arm/lib/getuser.S
arch/arm/lib/putuser.S
arch/arm/mach-omap2/Kconfig
arch/arm/mach-pxa/Kconfig
arch/arm/mm/copypage-fa.c
arch/arm/mm/copypage-feroceon.c
arch/arm/mm/copypage-v4mc.c
arch/arm/mm/copypage-v4wb.c
arch/arm/mm/copypage-v4wt.c
arch/arm/mm/copypage-xsc3.c
arch/arm/mm/copypage-xscale.c
arch/arm/mm/fault.c
arch/arm/mm/proc-macros.S
arch/arm/mm/proc-v7-bugs.c
arch/arm/mm/proc-v7.S
arch/arm/mm/pv-fixup-asm.S
arch/arm/plat-omap/Kconfig
arch/arm/vfp/vfpmodule.c

index 91be74d..a37ee58 100644 (file)
@@ -27,14 +27,14 @@ config ARM
        select ARCH_WANT_IPC_PARSE_VERSION
        select BUILDTIME_EXTABLE_SORT if MMU
        select CLONE_BACKWARDS
-       select CPU_PM if (SUSPEND || CPU_IDLE)
+       select CPU_PM if SUSPEND || CPU_IDLE
        select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
        select DMA_DIRECT_OPS if !MMU
        select EDAC_SUPPORT
        select EDAC_ATOMIC_SCRUB
        select GENERIC_ALLOCATOR
        select GENERIC_ARCH_TOPOLOGY if ARM_CPU_TOPOLOGY
-       select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI)
+       select GENERIC_ATOMIC64 if CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI
        select GENERIC_CLOCKEVENTS_BROADCAST if SMP
        select GENERIC_CPU_AUTOPROBE
        select GENERIC_EARLY_IOREMAP
@@ -49,12 +49,12 @@ config ARM
        select GENERIC_STRNLEN_USER
        select HANDLE_DOMAIN_IRQ
        select HARDIRQS_SW_RESEND
-       select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
+       select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
        select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
        select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
        select HAVE_ARCH_MMAP_RND_BITS if MMU
-       select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
+       select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT
        select HAVE_ARCH_THREAD_STRUCT_WHITELIST
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARM_SMCCC if CPU_V7
@@ -63,16 +63,16 @@ config ARM
        select HAVE_C_RECORDMCOUNT
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_CONTIGUOUS if MMU
-       select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU
+       select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
        select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
        select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
        select HAVE_EXIT_THREAD
-       select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
-       select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
-       select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
+       select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
+       select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL
+       select HAVE_FUNCTION_TRACER if !XIP_KERNEL
        select HAVE_GCC_PLUGINS
        select HAVE_GENERIC_DMA_COHERENT
-       select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
+       select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
        select HAVE_IDE if PCI || ISA || PCMCIA
        select HAVE_IRQ_TIME_ACCOUNTING
        select HAVE_KERNEL_GZIP
@@ -81,15 +81,15 @@ config ARM
        select HAVE_KERNEL_LZO
        select HAVE_KERNEL_XZ
        select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
-       select HAVE_KRETPROBES if (HAVE_KPROBES)
+       select HAVE_KRETPROBES if HAVE_KPROBES
        select HAVE_MOD_ARCH_SPECIFIC
        select HAVE_NMI
-       select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
+       select HAVE_OPROFILE if HAVE_PERF_EVENTS
        select HAVE_OPTPROBES if !THUMB2_KERNEL
        select HAVE_PERF_EVENTS
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
-       select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
+       select HAVE_RCU_TABLE_FREE if SMP && ARM_LPAE
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RSEQ
        select HAVE_STACKPROTECTOR
@@ -1783,7 +1783,6 @@ config PARAVIRT
 config PARAVIRT_TIME_ACCOUNTING
        bool "Paravirtual steal time accounting"
        select PARAVIRT
-       default n
        help
          Select this option to enable fine granularity task steal time
          accounting. Time spent executing other tasks in parallel with
index 41fa731..330cd3c 100644 (file)
@@ -98,6 +98,24 @@ static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline)
        setprop_string(fdt, "/chosen", "bootargs", cmdline);
 }
 
+static void hex_str(char *out, uint32_t value)
+{
+       uint32_t digit;
+       int idx;
+
+       for (idx = 7; idx >= 0; idx--) {
+               digit = value >> 28;
+               value <<= 4;
+               digit &= 0xf;
+               if (digit < 10)
+                       digit += '0';
+               else
+                       digit += 'A'-10;
+               *out++ = digit;
+       }
+       *out = '\0';
+}
+
 /*
  * Convert and fold provided ATAGs into the provided FDT.
  *
@@ -180,6 +198,11 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
                                        initrd_start);
                        setprop_cell(fdt, "/chosen", "linux,initrd-end",
                                        initrd_start + initrd_size);
+               } else if (atag->hdr.tag == ATAG_SERIAL) {
+                       char serno[16+2];
+                       hex_str(serno, atag->u.serialnr.high);
+                       hex_str(serno+8, atag->u.serialnr.low);
+                       setprop_string(fdt, "/", "serial-number", serno);
                }
        }
 
index 88286dd..28a48e0 100644 (file)
        .endm
 #endif
 
-#define USER(x...)                             \
+#define USERL(l, x...)                         \
 9999:  x;                                      \
        .pushsection __ex_table,"a";            \
        .align  3;                              \
-       .long   9999b,9001f;                    \
+       .long   9999b,l;                        \
        .popsection
 
+#define USER(x...)     USERL(9001f, x)
+
 #ifdef CONFIG_SMP
 #define ALT_SMP(instr...)                                      \
 9998:  instr
index 0d28924..775cac3 100644 (file)
 #include <linux/kernel.h>
 
 extern unsigned int processor_id;
+struct proc_info_list *lookup_processor(u32 midr);
 
 #ifdef CONFIG_CPU_CP15
 #define read_cpuid(reg)                                                        \
index e25f439..e1b6f28 100644 (file)
@@ -23,7 +23,7 @@ struct mm_struct;
 /*
  * Don't change this structure - ASM code relies on it.
  */
-extern struct processor {
+struct processor {
        /* MISC
         * get data abort address/flags
         */
@@ -79,9 +79,13 @@ extern struct processor {
        unsigned int suspend_size;
        void (*do_suspend)(void *);
        void (*do_resume)(void *);
-} processor;
+};
 
 #ifndef MULTI_CPU
+static inline void init_proc_vtable(const struct processor *p)
+{
+}
+
 extern void cpu_proc_init(void);
 extern void cpu_proc_fin(void);
 extern int cpu_do_idle(void);
@@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
 extern void cpu_do_suspend(void *);
 extern void cpu_do_resume(void *);
 #else
-#define cpu_proc_init                  processor._proc_init
-#define cpu_proc_fin                   processor._proc_fin
-#define cpu_reset                      processor.reset
-#define cpu_do_idle                    processor._do_idle
-#define cpu_dcache_clean_area          processor.dcache_clean_area
-#define cpu_set_pte_ext                        processor.set_pte_ext
-#define cpu_do_switch_mm               processor.switch_mm
 
-/* These three are private to arch/arm/kernel/suspend.c */
-#define cpu_do_suspend                 processor.do_suspend
-#define cpu_do_resume                  processor.do_resume
+extern struct processor processor;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+#include <linux/smp.h>
+/*
+ * This can't be a per-cpu variable because we need to access it before
+ * per-cpu has been initialised.  We have a couple of functions that are
+ * called in a pre-emptible context, and so can't use smp_processor_id()
+ * there, hence PROC_TABLE().  We insist in init_proc_vtable() that the
+ * function pointers for these are identical across all CPUs.
+ */
+extern struct processor *cpu_vtable[];
+#define PROC_VTABLE(f)                 cpu_vtable[smp_processor_id()]->f
+#define PROC_TABLE(f)                  cpu_vtable[0]->f
+static inline void init_proc_vtable(const struct processor *p)
+{
+       unsigned int cpu = smp_processor_id();
+       *cpu_vtable[cpu] = *p;
+       WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
+                    cpu_vtable[0]->dcache_clean_area);
+       WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
+                    cpu_vtable[0]->set_pte_ext);
+}
+#else
+#define PROC_VTABLE(f)                 processor.f
+#define PROC_TABLE(f)                  processor.f
+static inline void init_proc_vtable(const struct processor *p)
+{
+       processor = *p;
+}
+#endif
+
+#define cpu_proc_init                  PROC_VTABLE(_proc_init)
+#define cpu_check_bugs                 PROC_VTABLE(check_bugs)
+#define cpu_proc_fin                   PROC_VTABLE(_proc_fin)
+#define cpu_reset                      PROC_VTABLE(reset)
+#define cpu_do_idle                    PROC_VTABLE(_do_idle)
+#define cpu_dcache_clean_area          PROC_TABLE(dcache_clean_area)
+#define cpu_set_pte_ext                        PROC_TABLE(set_pte_ext)
+#define cpu_do_switch_mm               PROC_VTABLE(switch_mm)
+
+/* These two are private to arch/arm/kernel/suspend.c */
+#define cpu_do_suspend                 PROC_VTABLE(do_suspend)
+#define cpu_do_resume                  PROC_VTABLE(do_resume)
 #endif
 
 extern void cpu_resume(void);
index c136eef..6390a40 100644 (file)
@@ -349,6 +349,13 @@ do {                                                                       \
 #define __get_user_asm_byte(x, addr, err)                      \
        __get_user_asm(x, addr, err, ldrb)
 
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __get_user_asm_half(x, addr, err)                      \
+       __get_user_asm(x, addr, err, ldrh)
+
+#else
+
 #ifndef __ARMEB__
 #define __get_user_asm_half(x, __gu_addr, err)                 \
 ({                                                             \
@@ -367,6 +374,8 @@ do {                                                                        \
 })
 #endif
 
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
 #define __get_user_asm_word(x, addr, err)                      \
        __get_user_asm(x, addr, err, ldr)
 #endif
@@ -442,6 +451,13 @@ do {                                                                       \
 #define __put_user_asm_byte(x, __pu_addr, err)                 \
        __put_user_asm(x, __pu_addr, err, strb)
 
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __put_user_asm_half(x, __pu_addr, err)                 \
+       __put_user_asm(x, __pu_addr, err, strh)
+
+#else
+
 #ifndef __ARMEB__
 #define __put_user_asm_half(x, __pu_addr, err)                 \
 ({                                                             \
@@ -458,6 +474,8 @@ do {                                                                        \
 })
 #endif
 
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
 #define __put_user_asm_word(x, __pu_addr, err)                 \
        __put_user_asm(x, __pu_addr, err, str)
 
index 7be5113..d41d359 100644 (file)
@@ -6,8 +6,8 @@
 void check_other_bugs(void)
 {
 #ifdef MULTI_CPU
-       if (processor.check_bugs)
-               processor.check_bugs();
+       if (cpu_check_bugs)
+               cpu_check_bugs();
 #endif
 }
 
index 6e0375e..997b023 100644 (file)
@@ -145,6 +145,9 @@ __mmap_switched_data:
 #endif
        .size   __mmap_switched_data, . - __mmap_switched_data
 
+       __FINIT
+       .text
+
 /*
  * This provides a C-API version of __lookup_processor_type
  */
@@ -156,9 +159,6 @@ ENTRY(lookup_processor_type)
        ldmfd   sp!, {r4 - r6, r9, pc}
 ENDPROC(lookup_processor_type)
 
-       __FINIT
-       .text
-
 /*
  * Read processor ID register (CP#15, CR0), and look up in the linker-built
  * supported processor list.  Note that we can't use the absolute addresses
index 6b1148c..4485d04 100644 (file)
@@ -398,7 +398,7 @@ ENTRY(secondary_startup)
        ldmia   r4, {r5, r7, r12}               @ address to jump to after
        sub     lr, r4, r5                      @ mmu has been enabled
        add     r3, r7, lr
-       ldrd    r4, [r3, #0]                    @ get secondary_data.pgdir
+       ldrd    r4, r5, [r3, #0]                @ get secondary_data.pgdir
 ARM_BE8(eor    r4, r4, r5)                     @ Swap r5 and r4 in BE:
 ARM_BE8(eor    r5, r4, r5)                     @ it can be done in 3 steps
 ARM_BE8(eor    r4, r4, r5)                     @ without using a temp reg.
index ac7e088..375b13f 100644 (file)
@@ -114,6 +114,11 @@ EXPORT_SYMBOL(elf_hwcap2);
 
 #ifdef MULTI_CPU
 struct processor processor __ro_after_init;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+struct processor *cpu_vtable[NR_CPUS] = {
+       [0] = &processor,
+};
+#endif
 #endif
 #ifdef MULTI_TLB
 struct cpu_tlb_fns cpu_tlb __ro_after_init;
@@ -666,28 +671,33 @@ static void __init smp_build_mpidr_hash(void)
 }
 #endif
 
-static void __init setup_processor(void)
+/*
+ * locate processor in the list of supported processor types.  The linker
+ * builds this table for us from the entries in arch/arm/mm/proc-*.S
+ */
+struct proc_info_list *lookup_processor(u32 midr)
 {
-       struct proc_info_list *list;
+       struct proc_info_list *list = lookup_processor_type(midr);
 
-       /*
-        * locate processor in the list of supported processor
-        * types.  The linker builds this table for us from the
-        * entries in arch/arm/mm/proc-*.S
-        */
-       list = lookup_processor_type(read_cpuid_id());
        if (!list) {
-               pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
-                      read_cpuid_id());
-               while (1);
+               pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
+                      smp_processor_id(), midr);
+               while (1)
+               /* can't use cpu_relax() here as it may require MMU setup */;
        }
 
+       return list;
+}
+
+static void __init setup_processor(void)
+{
+       unsigned int midr = read_cpuid_id();
+       struct proc_info_list *list = lookup_processor(midr);
+
        cpu_name = list->cpu_name;
        __cpu_architecture = __get_cpu_architecture();
 
-#ifdef MULTI_CPU
-       processor = *list->proc;
-#endif
+       init_proc_vtable(list->proc);
 #ifdef MULTI_TLB
        cpu_tlb = *list->tlb;
 #endif
@@ -699,7 +709,7 @@ static void __init setup_processor(void)
 #endif
 
        pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
-               cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
+               list->cpu_name, midr, midr & 15,
                proc_arch[cpu_architecture()], get_cr());
 
        snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
index 0978282..3bf8223 100644 (file)
@@ -42,6 +42,7 @@
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
+#include <asm/procinfo.h>
 #include <asm/processor.h>
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
@@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
 #endif
 }
 
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+       if (!cpu_vtable[cpu])
+               cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
+
+       return cpu_vtable[cpu] ? 0 : -ENOMEM;
+}
+
+static void secondary_biglittle_init(void)
+{
+       init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
+}
+#else
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+       return 0;
+}
+
+static void secondary_biglittle_init(void)
+{
+}
+#endif
+
 int __cpu_up(unsigned int cpu, struct task_struct *idle)
 {
        int ret;
@@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
        if (!smp_ops.smp_boot_secondary)
                return -ENOSYS;
 
+       ret = secondary_biglittle_prepare(cpu);
+       if (ret)
+               return ret;
+
        /*
         * We need to tell the secondary core where to find
         * its stack and the page tables.
@@ -359,6 +388,8 @@ asmlinkage void secondary_start_kernel(void)
        struct mm_struct *mm = &init_mm;
        unsigned int cpu;
 
+       secondary_biglittle_init();
+
        /*
         * The identity mapping is uncached (strongly ordered), so
         * switch away from it before attempting any exclusive accesses.
@@ -693,6 +724,21 @@ void smp_send_stop(void)
                pr_warn("SMP: failed to stop secondary CPUs\n");
 }
 
+/* In case panic() and panic() called at the same time on CPU1 and CPU2,
+ * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
+ * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
+ * kdump fails. So split out the panic_smp_self_stop() and add
+ * set_cpu_online(smp_processor_id(), false).
+ */
+void panic_smp_self_stop(void)
+{
+       pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
+                smp_processor_id());
+       set_cpu_online(smp_processor_id(), false);
+       while (1)
+               cpu_relax();
+}
+
 /*
  * not supported here
  */
index 6709a8d..0d4c189 100644 (file)
  *     Number of bytes NOT copied.
  */
 
+#ifdef CONFIG_CPU_USE_DOMAINS
+
 #ifndef CONFIG_THUMB2_KERNEL
 #define LDR1W_SHIFT    0
 #else
 #define LDR1W_SHIFT    1
 #endif
-#define STR1W_SHIFT    0
 
        .macro ldr1w ptr reg abort
        ldrusr  \reg, \ptr, 4, abort=\abort
        ldr4w \ptr, \reg5, \reg6, \reg7, \reg8, \abort
        .endm
 
+#else
+
+#define LDR1W_SHIFT    0
+
+       .macro ldr1w ptr reg abort
+       USERL(\abort, W(ldr) \reg, [\ptr], #4)
+       .endm
+
+       .macro ldr4w ptr reg1 reg2 reg3 reg4 abort
+       USERL(\abort, ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4})
+       .endm
+
+       .macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+       USERL(\abort, ldmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8})
+       .endm
+
+#endif /* CONFIG_CPU_USE_DOMAINS */
+
        .macro ldr1b ptr reg cond=al abort
        ldrusr  \reg, \ptr, 1, \cond, abort=\abort
        .endm
 
+#define STR1W_SHIFT    0
+
        .macro str1w ptr reg abort
        W(str) \reg, [\ptr], #4
        .endm
index 970abe5..97a6ff4 100644 (file)
  */
 
 #define LDR1W_SHIFT    0
-#ifndef CONFIG_THUMB2_KERNEL
-#define STR1W_SHIFT    0
-#else
-#define STR1W_SHIFT    1
-#endif
 
        .macro ldr1w ptr reg abort
        W(ldr) \reg, [\ptr], #4
        ldr\cond\()b \reg, [\ptr], #1
        .endm
 
+#ifdef CONFIG_CPU_USE_DOMAINS
+
+#ifndef CONFIG_THUMB2_KERNEL
+#define STR1W_SHIFT    0
+#else
+#define STR1W_SHIFT    1
+#endif
+
        .macro str1w ptr reg abort
        strusr  \reg, \ptr, 4, abort=\abort
        .endm
        str1w \ptr, \reg8, \abort
        .endm
 
+#else
+
+#define STR1W_SHIFT    0
+
+       .macro str1w ptr reg abort
+       USERL(\abort, W(str) \reg, [\ptr], #4)
+       .endm
+
+       .macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+       USERL(\abort, stmia \ptr!, {\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8})
+       .endm
+
+#endif /* CONFIG_CPU_USE_DOMAINS */
+
        .macro str1b ptr reg cond=al abort
        strusr  \reg, \ptr, 1, \cond, abort=\abort
        .endm
index 746e780..b2e4bc3 100644 (file)
@@ -42,6 +42,12 @@ _ASM_NOKPROBE(__get_user_1)
 
 ENTRY(__get_user_2)
        check_uaccess r0, 2, r1, r2, __get_user_bad
+#if __LINUX_ARM_ARCH__ >= 6
+
+2: TUSER(ldrh) r2, [r0]
+
+#else
+
 #ifdef CONFIG_CPU_USE_DOMAINS
 rb     .req    ip
 2:     ldrbt   r2, [r0], #1
@@ -56,6 +62,9 @@ rb    .req    r0
 #else
        orr     r2, rb, r2, lsl #8
 #endif
+
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_2)
@@ -145,7 +154,9 @@ _ASM_NOKPROBE(__get_user_bad8)
 .pushsection __ex_table, "a"
        .long   1b, __get_user_bad
        .long   2b, __get_user_bad
+#if __LINUX_ARM_ARCH__ < 6
        .long   3b, __get_user_bad
+#endif
        .long   4b, __get_user_bad
        .long   5b, __get_user_bad8
        .long   6b, __get_user_bad8
index 38d660d..515eeaa 100644 (file)
@@ -41,16 +41,13 @@ ENDPROC(__put_user_1)
 
 ENTRY(__put_user_2)
        check_uaccess r0, 2, r1, ip, __put_user_bad
-       mov     ip, r2, lsr #8
-#ifdef CONFIG_THUMB2_KERNEL
-#ifndef __ARMEB__
-2: TUSER(strb) r2, [r0]
-3: TUSER(strb) ip, [r0, #1]
+#if __LINUX_ARM_ARCH__ >= 6
+
+2: TUSER(strh) r2, [r0]
+
 #else
-2: TUSER(strb) ip, [r0]
-3: TUSER(strb) r2, [r0, #1]
-#endif
-#else  /* !CONFIG_THUMB2_KERNEL */
+
+       mov     ip, r2, lsr #8
 #ifndef __ARMEB__
 2: TUSER(strb) r2, [r0], #1
 3: TUSER(strb) ip, [r0]
@@ -58,7 +55,8 @@ ENTRY(__put_user_2)
 2: TUSER(strb) ip, [r0], #1
 3: TUSER(strb) r2, [r0]
 #endif
-#endif /* CONFIG_THUMB2_KERNEL */
+
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
        mov     r0, #0
        ret     lr
 ENDPROC(__put_user_2)
@@ -91,7 +89,9 @@ ENDPROC(__put_user_bad)
 .pushsection __ex_table, "a"
        .long   1b, __put_user_bad
        .long   2b, __put_user_bad
+#if __LINUX_ARM_ARCH__ < 6
        .long   3b, __put_user_bad
+#endif
        .long   4b, __put_user_bad
        .long   5b, __put_user_bad
        .long   6b, __put_user_bad
index 9f27b48..5e33d1a 100644 (file)
@@ -223,7 +223,6 @@ config MACH_NOKIA_N8X0
 config OMAP3_SDRC_AC_TIMING
        bool "Enable SDRC AC timing register changes"
        depends on ARCH_OMAP3
-       default n
        help
          If you know that none of your system initiators will attempt to
          access SDRAM during CORE DVFS, select Y here.  This should boost
index fd724a7..7298d07 100644 (file)
@@ -552,7 +552,6 @@ config TOSA_BT
 config TOSA_USE_EXT_KEYCODES
        bool "Tosa keyboard: use extended keycodes"
        depends on MACH_TOSA
-       default n
        help
          Say Y here to enable the tosa keyboard driver to generate extended
          (>= 127) keycodes. Be aware, that they can't be correctly interpreted
index d130a5e..bf24690 100644 (file)
 /*
  * Faraday optimised copy_user_page
  */
-static void __naked
-fa_copy_user_page(void *kto, const void *kfrom)
+static void fa_copy_user_page(void *kto, const void *kfrom)
 {
-       asm("\
-       stmfd   sp!, {r4, lr}                   @ 2\n\
-       mov     r2, %0                          @ 1\n\
-1:     ldmia   r1!, {r3, r4, ip, lr}           @ 4\n\
-       stmia   r0, {r3, r4, ip, lr}            @ 4\n\
-       mcr     p15, 0, r0, c7, c14, 1          @ 1   clean and invalidate D line\n\
-       add     r0, r0, #16                     @ 1\n\
-       ldmia   r1!, {r3, r4, ip, lr}           @ 4\n\
-       stmia   r0, {r3, r4, ip, lr}            @ 4\n\
-       mcr     p15, 0, r0, c7, c14, 1          @ 1   clean and invalidate D line\n\
-       add     r0, r0, #16                     @ 1\n\
-       subs    r2, r2, #1                      @ 1\n\
+       int tmp;
+
+       asm volatile ("\
+1:     ldmia   %1!, {r3, r4, ip, lr}           @ 4\n\
+       stmia   %0, {r3, r4, ip, lr}            @ 4\n\
+       mcr     p15, 0, %0, c7, c14, 1          @ 1   clean and invalidate D line\n\
+       add     %0, %0, #16                     @ 1\n\
+       ldmia   %1!, {r3, r4, ip, lr}           @ 4\n\
+       stmia   %0, {r3, r4, ip, lr}            @ 4\n\
+       mcr     p15, 0, %0, c7, c14, 1          @ 1   clean and invalidate D line\n\
+       add     %0, %0, #16                     @ 1\n\
+       subs    %2, %2, #1                      @ 1\n\
        bne     1b                              @ 1\n\
-       mcr     p15, 0, r2, c7, c10, 4          @ 1   drain WB\n\
-       ldmfd   sp!, {r4, pc}                   @ 3"
-       :
-       : "I" (PAGE_SIZE / 32));
+       mcr     p15, 0, %2, c7, c10, 4          @ 1   drain WB"
+       : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
+       : "2" (PAGE_SIZE / 32)
+       : "r3", "r4", "ip", "lr");
 }
 
 void fa_copy_user_highpage(struct page *to, struct page *from,
index 49ee0c1..cc81973 100644 (file)
 #include <linux/init.h>
 #include <linux/highmem.h>
 
-static void __naked
-feroceon_copy_user_page(void *kto, const void *kfrom)
+static void feroceon_copy_user_page(void *kto, const void *kfrom)
 {
-       asm("\
-       stmfd   sp!, {r4-r9, lr}                \n\
-       mov     ip, %2                          \n\
-1:     mov     lr, r1                          \n\
-       ldmia   r1!, {r2 - r9}                  \n\
-       pld     [lr, #32]                       \n\
-       pld     [lr, #64]                       \n\
-       pld     [lr, #96]                       \n\
-       pld     [lr, #128]                      \n\
-       pld     [lr, #160]                      \n\
-       pld     [lr, #192]                      \n\
-       pld     [lr, #224]                      \n\
-       stmia   r0, {r2 - r9}                   \n\
-       ldmia   r1!, {r2 - r9}                  \n\
-       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line\n\
-       add     r0, r0, #32                     \n\
-       stmia   r0, {r2 - r9}                   \n\
-       ldmia   r1!, {r2 - r9}                  \n\
-       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line\n\
-       add     r0, r0, #32                     \n\
-       stmia   r0, {r2 - r9}                   \n\
-       ldmia   r1!, {r2 - r9}                  \n\
-       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line\n\
-       add     r0, r0, #32                     \n\
-       stmia   r0, {r2 - r9}                   \n\
-       ldmia   r1!, {r2 - r9}                  \n\
-       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line\n\
-       add     r0, r0, #32                     \n\
-       stmia   r0, {r2 - r9}                   \n\
-       ldmia   r1!, {r2 - r9}                  \n\
-       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line\n\
-       add     r0, r0, #32                     \n\
-       stmia   r0, {r2 - r9}                   \n\
-       ldmia   r1!, {r2 - r9}                  \n\
-       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line\n\
-       add     r0, r0, #32                     \n\
-       stmia   r0, {r2 - r9}                   \n\
-       ldmia   r1!, {r2 - r9}                  \n\
-       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line\n\
-       add     r0, r0, #32                     \n\
-       stmia   r0, {r2 - r9}                   \n\
-       subs    ip, ip, #(32 * 8)               \n\
-       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line\n\
-       add     r0, r0, #32                     \n\
+       int tmp;
+
+       asm volatile ("\
+1:     ldmia   %1!, {r2 - r7, ip, lr}          \n\
+       pld     [%1, #0]                        \n\
+       pld     [%1, #32]                       \n\
+       pld     [%1, #64]                       \n\
+       pld     [%1, #96]                       \n\
+       pld     [%1, #128]                      \n\
+       pld     [%1, #160]                      \n\
+       pld     [%1, #192]                      \n\
+       stmia   %0, {r2 - r7, ip, lr}           \n\
+       ldmia   %1!, {r2 - r7, ip, lr}          \n\
+       mcr     p15, 0, %0, c7, c14, 1          @ clean and invalidate D line\n\
+       add     %0, %0, #32                     \n\
+       stmia   %0, {r2 - r7, ip, lr}           \n\
+       ldmia   %1!, {r2 - r7, ip, lr}          \n\
+       mcr     p15, 0, %0, c7, c14, 1          @ clean and invalidate D line\n\
+       add     %0, %0, #32                     \n\
+       stmia   %0, {r2 - r7, ip, lr}           \n\
+       ldmia   %1!, {r2 - r7, ip, lr}          \n\
+       mcr     p15, 0, %0, c7, c14, 1          @ clean and invalidate D line\n\
+       add     %0, %0, #32                     \n\
+       stmia   %0, {r2 - r7, ip, lr}           \n\
+       ldmia   %1!, {r2 - r7, ip, lr}          \n\
+       mcr     p15, 0, %0, c7, c14, 1          @ clean and invalidate D line\n\
+       add     %0, %0, #32                     \n\
+       stmia   %0, {r2 - r7, ip, lr}           \n\
+       ldmia   %1!, {r2 - r7, ip, lr}          \n\
+       mcr     p15, 0, %0, c7, c14, 1          @ clean and invalidate D line\n\
+       add     %0, %0, #32                     \n\
+       stmia   %0, {r2 - r7, ip, lr}           \n\
+       ldmia   %1!, {r2 - r7, ip, lr}          \n\
+       mcr     p15, 0, %0, c7, c14, 1          @ clean and invalidate D line\n\
+       add     %0, %0, #32                     \n\
+       stmia   %0, {r2 - r7, ip, lr}           \n\
+       ldmia   %1!, {r2 - r7, ip, lr}          \n\
+       mcr     p15, 0, %0, c7, c14, 1          @ clean and invalidate D line\n\
+       add     %0, %0, #32                     \n\
+       stmia   %0, {r2 - r7, ip, lr}           \n\
+       subs    %2, %2, #(32 * 8)               \n\
+       mcr     p15, 0, %0, c7, c14, 1          @ clean and invalidate D line\n\
+       add     %0, %0, #32                     \n\
        bne     1b                              \n\
-       mcr     p15, 0, ip, c7, c10, 4          @ drain WB\n\
-       ldmfd   sp!, {r4-r9, pc}"
-       :
-       : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE));
+       mcr     p15, 0, %2, c7, c10, 4          @ drain WB"
+       : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
+       : "2" (PAGE_SIZE)
+       : "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr");
 }
 
 void feroceon_copy_user_highpage(struct page *to, struct page *from,
index 0224416..b03202c 100644 (file)
@@ -40,12 +40,11 @@ static DEFINE_RAW_SPINLOCK(minicache_lock);
  * instruction.  If your processor does not supply this, you have to write your
  * own copy_user_highpage that does the right thing.
  */
-static void __naked
-mc_copy_user_page(void *from, void *to)
+static void mc_copy_user_page(void *from, void *to)
 {
-       asm volatile(
-       "stmfd  sp!, {r4, lr}                   @ 2\n\
-       mov     r4, %2                          @ 1\n\
+       int tmp;
+
+       asm volatile ("\
        ldmia   %0!, {r2, r3, ip, lr}           @ 4\n\
 1:     mcr     p15, 0, %1, c7, c6, 1           @ 1   invalidate D line\n\
        stmia   %1!, {r2, r3, ip, lr}           @ 4\n\
@@ -55,13 +54,13 @@ mc_copy_user_page(void *from, void *to)
        mcr     p15, 0, %1, c7, c6, 1           @ 1   invalidate D line\n\
        stmia   %1!, {r2, r3, ip, lr}           @ 4\n\
        ldmia   %0!, {r2, r3, ip, lr}           @ 4\n\
-       subs    r4, r4, #1                      @ 1\n\
+       subs    %2, %2, #1                      @ 1\n\
        stmia   %1!, {r2, r3, ip, lr}           @ 4\n\
        ldmneia %0!, {r2, r3, ip, lr}           @ 4\n\
-       bne     1b                              @ 1\n\
-       ldmfd   sp!, {r4, pc}                   @ 3"
-       :
-       : "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
+       bne     1b                              @ "
+       : "+&r" (from), "+&r" (to), "=&r" (tmp)
+       : "2" (PAGE_SIZE / 64)
+       : "r2", "r3", "ip", "lr");
 }
 
 void v4_mc_copy_user_highpage(struct page *to, struct page *from,
index 067d0fd..cd3e165 100644 (file)
  * instruction.  If your processor does not supply this, you have to write your
  * own copy_user_highpage that does the right thing.
  */
-static void __naked
-v4wb_copy_user_page(void *kto, const void *kfrom)
+static void v4wb_copy_user_page(void *kto, const void *kfrom)
 {
-       asm("\
-       stmfd   sp!, {r4, lr}                   @ 2\n\
-       mov     r2, %2                          @ 1\n\
-       ldmia   r1!, {r3, r4, ip, lr}           @ 4\n\
-1:     mcr     p15, 0, r0, c7, c6, 1           @ 1   invalidate D line\n\
-       stmia   r0!, {r3, r4, ip, lr}           @ 4\n\
-       ldmia   r1!, {r3, r4, ip, lr}           @ 4+1\n\
-       stmia   r0!, {r3, r4, ip, lr}           @ 4\n\
-       ldmia   r1!, {r3, r4, ip, lr}           @ 4\n\
-       mcr     p15, 0, r0, c7, c6, 1           @ 1   invalidate D line\n\
-       stmia   r0!, {r3, r4, ip, lr}           @ 4\n\
-       ldmia   r1!, {r3, r4, ip, lr}           @ 4\n\
-       subs    r2, r2, #1                      @ 1\n\
-       stmia   r0!, {r3, r4, ip, lr}           @ 4\n\
-       ldmneia r1!, {r3, r4, ip, lr}           @ 4\n\
+       int tmp;
+
+       asm volatile ("\
+       ldmia   %1!, {r3, r4, ip, lr}           @ 4\n\
+1:     mcr     p15, 0, %0, c7, c6, 1           @ 1   invalidate D line\n\
+       stmia   %0!, {r3, r4, ip, lr}           @ 4\n\
+       ldmia   %1!, {r3, r4, ip, lr}           @ 4+1\n\
+       stmia   %0!, {r3, r4, ip, lr}           @ 4\n\
+       ldmia   %1!, {r3, r4, ip, lr}           @ 4\n\
+       mcr     p15, 0, %0, c7, c6, 1           @ 1   invalidate D line\n\
+       stmia   %0!, {r3, r4, ip, lr}           @ 4\n\
+       ldmia   %1!, {r3, r4, ip, lr}           @ 4\n\
+       subs    %2, %2, #1                      @ 1\n\
+       stmia   %0!, {r3, r4, ip, lr}           @ 4\n\
+       ldmneia %1!, {r3, r4, ip, lr}           @ 4\n\
        bne     1b                              @ 1\n\
-       mcr     p15, 0, r1, c7, c10, 4          @ 1   drain WB\n\
-       ldmfd    sp!, {r4, pc}                  @ 3"
-       :
-       : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
+       mcr     p15, 0, %1, c7, c10, 4          @ 1   drain WB"
+       : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
+       : "2" (PAGE_SIZE / 64)
+       : "r3", "r4", "ip", "lr");
 }
 
 void v4wb_copy_user_highpage(struct page *to, struct page *from,
index b85c5da..8614572 100644 (file)
  * dirty data in the cache.  However, we do have to ensure that
  * subsequent reads are up to date.
  */
-static void __naked
-v4wt_copy_user_page(void *kto, const void *kfrom)
+static void v4wt_copy_user_page(void *kto, const void *kfrom)
 {
-       asm("\
-       stmfd   sp!, {r4, lr}                   @ 2\n\
-       mov     r2, %2                          @ 1\n\
-       ldmia   r1!, {r3, r4, ip, lr}           @ 4\n\
-1:     stmia   r0!, {r3, r4, ip, lr}           @ 4\n\
-       ldmia   r1!, {r3, r4, ip, lr}           @ 4+1\n\
-       stmia   r0!, {r3, r4, ip, lr}           @ 4\n\
-       ldmia   r1!, {r3, r4, ip, lr}           @ 4\n\
-       stmia   r0!, {r3, r4, ip, lr}           @ 4\n\
-       ldmia   r1!, {r3, r4, ip, lr}           @ 4\n\
-       subs    r2, r2, #1                      @ 1\n\
-       stmia   r0!, {r3, r4, ip, lr}           @ 4\n\
-       ldmneia r1!, {r3, r4, ip, lr}           @ 4\n\
+       int tmp;
+
+       asm volatile ("\
+       ldmia   %1!, {r3, r4, ip, lr}           @ 4\n\
+1:     stmia   %0!, {r3, r4, ip, lr}           @ 4\n\
+       ldmia   %1!, {r3, r4, ip, lr}           @ 4+1\n\
+       stmia   %0!, {r3, r4, ip, lr}           @ 4\n\
+       ldmia   %1!, {r3, r4, ip, lr}           @ 4\n\
+       stmia   %0!, {r3, r4, ip, lr}           @ 4\n\
+       ldmia   %1!, {r3, r4, ip, lr}           @ 4\n\
+       subs    %2, %2, #1                      @ 1\n\
+       stmia   %0!, {r3, r4, ip, lr}           @ 4\n\
+       ldmneia %1!, {r3, r4, ip, lr}           @ 4\n\
        bne     1b                              @ 1\n\
-       mcr     p15, 0, r2, c7, c7, 0           @ flush ID cache\n\
-       ldmfd   sp!, {r4, pc}                   @ 3"
-       :
-       : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64));
+       mcr     p15, 0, %2, c7, c7, 0           @ flush ID cache"
+       : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
+       : "2" (PAGE_SIZE / 64)
+       : "r3", "r4", "ip", "lr");
 }
 
 void v4wt_copy_user_highpage(struct page *to, struct page *from,
index 03a2042..a081582 100644 (file)
 
 /*
  * XSC3 optimised copy_user_highpage
- *  r0 = destination
- *  r1 = source
  *
  * The source page may have some clean entries in the cache already, but we
  * can safely ignore them - break_cow() will flush them out of the cache
  * if we eventually end up using our copied page.
  *
  */
-static void __naked
-xsc3_mc_copy_user_page(void *kto, const void *kfrom)
+static void xsc3_mc_copy_user_page(void *kto, const void *kfrom)
 {
-       asm("\
-       stmfd   sp!, {r4, r5, lr}               \n\
-       mov     lr, %2                          \n\
-                                               \n\
-       pld     [r1, #0]                        \n\
-       pld     [r1, #32]                       \n\
-1:     pld     [r1, #64]                       \n\
-       pld     [r1, #96]                       \n\
+       int tmp;
+
+       asm volatile ("\
+       pld     [%1, #0]                        \n\
+       pld     [%1, #32]                       \n\
+1:     pld     [%1, #64]                       \n\
+       pld     [%1, #96]                       \n\
                                                \n\
-2:     ldrd    r2, [r1], #8                    \n\
-       mov     ip, r0                          \n\
-       ldrd    r4, [r1], #8                    \n\
-       mcr     p15, 0, ip, c7, c6, 1           @ invalidate\n\
-       strd    r2, [r0], #8                    \n\
-       ldrd    r2, [r1], #8                    \n\
-       strd    r4, [r0], #8                    \n\
-       ldrd    r4, [r1], #8                    \n\
-       strd    r2, [r0], #8                    \n\
-       strd    r4, [r0], #8                    \n\
-       ldrd    r2, [r1], #8                    \n\
-       mov     ip, r0                          \n\
-       ldrd    r4, [r1], #8                    \n\
-       mcr     p15, 0, ip, c7, c6, 1           @ invalidate\n\
-       strd    r2, [r0], #8                    \n\
-       ldrd    r2, [r1], #8                    \n\
-       subs    lr, lr, #1                      \n\
-       strd    r4, [r0], #8                    \n\
-       ldrd    r4, [r1], #8                    \n\
-       strd    r2, [r0], #8                    \n\
-       strd    r4, [r0], #8                    \n\
+2:     ldrd    r2, r3, [%1], #8                \n\
+       ldrd    r4, r5, [%1], #8                \n\
+       mcr     p15, 0, %0, c7, c6, 1           @ invalidate\n\
+       strd    r2, r3, [%0], #8                \n\
+       ldrd    r2, r3, [%1], #8                \n\
+       strd    r4, r5, [%0], #8                \n\
+       ldrd    r4, r5, [%1], #8                \n\
+       strd    r2, r3, [%0], #8                \n\
+       strd    r4, r5, [%0], #8                \n\
+       ldrd    r2, r3, [%1], #8                \n\
+       ldrd    r4, r5, [%1], #8                \n\
+       mcr     p15, 0, %0, c7, c6, 1           @ invalidate\n\
+       strd    r2, r3, [%0], #8                \n\
+       ldrd    r2, r3, [%1], #8                \n\
+       subs    %2, %2, #1                      \n\
+       strd    r4, r5, [%0], #8                \n\
+       ldrd    r4, r5, [%1], #8                \n\
+       strd    r2, r3, [%0], #8                \n\
+       strd    r4, r5, [%0], #8                \n\
        bgt     1b                              \n\
-       beq     2b                              \n\
-                                               \n\
-       ldmfd   sp!, {r4, r5, pc}"
-       :
-       : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1));
+       beq     2b                              "
+       : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
+       : "2" (PAGE_SIZE / 64 - 1)
+       : "r2", "r3", "r4", "r5");
 }
 
 void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
@@ -85,8 +78,6 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
 
 /*
  * XScale optimised clear_user_page
- *  r0 = destination
- *  r1 = virtual user address of ultimate destination page
  */
 void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
 {
@@ -96,10 +87,10 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
        mov     r2, #0                          \n\
        mov     r3, #0                          \n\
 1:     mcr     p15, 0, %0, c7, c6, 1           @ invalidate line\n\
-       strd    r2, [%0], #8                    \n\
-       strd    r2, [%0], #8                    \n\
-       strd    r2, [%0], #8                    \n\
-       strd    r2, [%0], #8                    \n\
+       strd    r2, r3, [%0], #8                \n\
+       strd    r2, r3, [%0], #8                \n\
+       strd    r2, r3, [%0], #8                \n\
+       strd    r2, r3, [%0], #8                \n\
        subs    r1, r1, #1                      \n\
        bne     1b"
        : "=r" (ptr)
index 9797237..63b9219 100644 (file)
@@ -36,52 +36,51 @@ static DEFINE_RAW_SPINLOCK(minicache_lock);
  * Dcache aliasing issue.  The writes will be forwarded to the write buffer,
  * and merged as appropriate.
  */
-static void __naked
-mc_copy_user_page(void *from, void *to)
+static void mc_copy_user_page(void *from, void *to)
 {
+       int tmp;
+
        /*
         * Strangely enough, best performance is achieved
         * when prefetching destination as well.  (NP)
         */
-       asm volatile(
-       "stmfd  sp!, {r4, r5, lr}               \n\
-       mov     lr, %2                          \n\
-       pld     [r0, #0]                        \n\
-       pld     [r0, #32]                       \n\
-       pld     [r1, #0]                        \n\
-       pld     [r1, #32]                       \n\
-1:     pld     [r0, #64]                       \n\
-       pld     [r0, #96]                       \n\
-       pld     [r1, #64]                       \n\
-       pld     [r1, #96]                       \n\
-2:     ldrd    r2, [r0], #8                    \n\
-       ldrd    r4, [r0], #8                    \n\
-       mov     ip, r1                          \n\
-       strd    r2, [r1], #8                    \n\
-       ldrd    r2, [r0], #8                    \n\
-       strd    r4, [r1], #8                    \n\
-       ldrd    r4, [r0], #8                    \n\
-       strd    r2, [r1], #8                    \n\
-       strd    r4, [r1], #8                    \n\
+       asm volatile ("\
+       pld     [%0, #0]                        \n\
+       pld     [%0, #32]                       \n\
+       pld     [%1, #0]                        \n\
+       pld     [%1, #32]                       \n\
+1:     pld     [%0, #64]                       \n\
+       pld     [%0, #96]                       \n\
+       pld     [%1, #64]                       \n\
+       pld     [%1, #96]                       \n\
+2:     ldrd    r2, r3, [%0], #8                \n\
+       ldrd    r4, r5, [%0], #8                \n\
+       mov     ip, %1                          \n\
+       strd    r2, r3, [%1], #8                \n\
+       ldrd    r2, r3, [%0], #8                \n\
+       strd    r4, r5, [%1], #8                \n\
+       ldrd    r4, r5, [%0], #8                \n\
+       strd    r2, r3, [%1], #8                \n\
+       strd    r4, r5, [%1], #8                \n\
        mcr     p15, 0, ip, c7, c10, 1          @ clean D line\n\
-       ldrd    r2, [r0], #8                    \n\
+       ldrd    r2, r3, [%0], #8                \n\
        mcr     p15, 0, ip, c7, c6, 1           @ invalidate D line\n\
-       ldrd    r4, [r0], #8                    \n\
-       mov     ip, r1                          \n\
-       strd    r2, [r1], #8                    \n\
-       ldrd    r2, [r0], #8                    \n\
-       strd    r4, [r1], #8                    \n\
-       ldrd    r4, [r0], #8                    \n\
-       strd    r2, [r1], #8                    \n\
-       strd    r4, [r1], #8                    \n\
+       ldrd    r4, r5, [%0], #8                \n\
+       mov     ip, %1                          \n\
+       strd    r2, r3, [%1], #8                \n\
+       ldrd    r2, r3, [%0], #8                \n\
+       strd    r4, r5, [%1], #8                \n\
+       ldrd    r4, r5, [%0], #8                \n\
+       strd    r2, r3, [%1], #8                \n\
+       strd    r4, r5, [%1], #8                \n\
        mcr     p15, 0, ip, c7, c10, 1          @ clean D line\n\
-       subs    lr, lr, #1                      \n\
+       subs    %2, %2, #1                      \n\
        mcr     p15, 0, ip, c7, c6, 1           @ invalidate D line\n\
        bgt     1b                              \n\
-       beq     2b                              \n\
-       ldmfd   sp!, {r4, r5, pc}               "
-       :
-       : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
+       beq     2b                              "
+       : "+&r" (from), "+&r" (to), "=&r" (tmp)
+       : "2" (PAGE_SIZE / 64 - 1)
+       : "r2", "r3", "r4", "r5", "ip");
 }
 
 void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
@@ -115,10 +114,10 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
        mov     r2, #0                          \n\
        mov     r3, #0                          \n\
 1:     mov     ip, %0                          \n\
-       strd    r2, [%0], #8                    \n\
-       strd    r2, [%0], #8                    \n\
-       strd    r2, [%0], #8                    \n\
-       strd    r2, [%0], #8                    \n\
+       strd    r2, r3, [%0], #8                \n\
+       strd    r2, r3, [%0], #8                \n\
+       strd    r2, r3, [%0], #8                \n\
+       strd    r2, r3, [%0], #8                \n\
        mcr     p15, 0, ip, c7, c10, 1          @ clean D line\n\
        subs    r1, r1, #1                      \n\
        mcr     p15, 0, ip, c7, c6, 1           @ invalidate D line\n\
index f4ea4c6..58f69fa 100644 (file)
@@ -173,6 +173,12 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
                show_regs(regs);
        }
 #endif
+#ifndef CONFIG_KUSER_HELPERS
+       if ((sig == SIGSEGV) && ((addr & PAGE_MASK) == 0xffff0000))
+               printk_ratelimited(KERN_DEBUG
+                                  "%s: CONFIG_KUSER_HELPERS disabled at 0x%08lx\n",
+                                  tsk->comm, addr);
+#endif
 
        tsk->thread.address = addr;
        tsk->thread.error_code = fsr;
index 81d0efb..5461d58 100644 (file)
        .endm
 
 .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
+/*
+ * If we are building for big.Little with branch predictor hardening,
+ * we need the processor function tables to remain available after boot.
+ */
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+       .section ".rodata"
+#endif
        .type   \name\()_processor_functions, #object
        .align 2
 ENTRY(\name\()_processor_functions)
@@ -309,6 +316,9 @@ ENTRY(\name\()_processor_functions)
        .endif
 
        .size   \name\()_processor_functions, . - \name\()_processor_functions
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+       .previous
+#endif
 .endm
 
 .macro define_cache_functions name:req
index 5544b82..9a07916 100644 (file)
@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
        case ARM_CPU_PART_CORTEX_A17:
        case ARM_CPU_PART_CORTEX_A73:
        case ARM_CPU_PART_CORTEX_A75:
-               if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
-                       goto bl_error;
                per_cpu(harden_branch_predictor_fn, cpu) =
                        harden_branch_predictor_bpiall;
                spectre_v2_method = "BPIALL";
@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
 
        case ARM_CPU_PART_CORTEX_A15:
        case ARM_CPU_PART_BRAHMA_B15:
-               if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
-                       goto bl_error;
                per_cpu(harden_branch_predictor_fn, cpu) =
                        harden_branch_predictor_iciallu;
                spectre_v2_method = "ICIALLU";
@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
                                          ARM_SMCCC_ARCH_WORKAROUND_1, &res);
                        if ((int)res.a0 != 0)
                                break;
-                       if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
-                               goto bl_error;
                        per_cpu(harden_branch_predictor_fn, cpu) =
                                call_hvc_arch_workaround_1;
-                       processor.switch_mm = cpu_v7_hvc_switch_mm;
+                       cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
                        spectre_v2_method = "hypervisor";
                        break;
 
@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
                                          ARM_SMCCC_ARCH_WORKAROUND_1, &res);
                        if ((int)res.a0 != 0)
                                break;
-                       if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
-                               goto bl_error;
                        per_cpu(harden_branch_predictor_fn, cpu) =
                                call_smc_arch_workaround_1;
-                       processor.switch_mm = cpu_v7_smc_switch_mm;
+                       cpu_do_switch_mm = cpu_v7_smc_switch_mm;
                        spectre_v2_method = "firmware";
                        break;
 
@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
        if (spectre_v2_method)
                pr_info("CPU%u: Spectre v2: using %s workaround\n",
                        smp_processor_id(), spectre_v2_method);
-       return;
-
-bl_error:
-       pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
-               cpu);
 }
 #else
 static void cpu_v7_spectre_init(void)
index 6fe5281..339eb17 100644 (file)
@@ -112,7 +112,7 @@ ENTRY(cpu_v7_hvc_switch_mm)
        hvc     #0
        ldmfd   sp!, {r0 - r3}
        b       cpu_v7_switch_mm
-ENDPROC(cpu_v7_smc_switch_mm)
+ENDPROC(cpu_v7_hvc_switch_mm)
 #endif
 ENTRY(cpu_v7_iciallu_switch_mm)
        mov     r3, #0
index 1867f3e..fd2ff90 100644 (file)
@@ -33,10 +33,10 @@ ENTRY(lpae_pgtables_remap_asm)
        add     r7, r2, #0x1000
        add     r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER
        add     r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER)
-1:     ldrd    r4, [r7]
+1:     ldrd    r4, r5, [r7]
        adds    r4, r4, r0
        adc     r5, r5, r1
-       strd    r4, [r7], #1 << L2_ORDER
+       strd    r4, r5, [r7], #1 << L2_ORDER
        cmp     r7, r6
        bls     1b
 
@@ -44,22 +44,22 @@ ENTRY(lpae_pgtables_remap_asm)
        add     r7, r2, #0x1000
        add     r7, r7, r3, lsr #SECTION_SHIFT - L2_ORDER
        bic     r7, r7, #(1 << L2_ORDER) - 1
-       ldrd    r4, [r7]
+       ldrd    r4, r5, [r7]
        adds    r4, r4, r0
        adc     r5, r5, r1
-       strd    r4, [r7], #1 << L2_ORDER
-       ldrd    r4, [r7]
+       strd    r4, r5, [r7], #1 << L2_ORDER
+       ldrd    r4, r5, [r7]
        adds    r4, r4, r0
        adc     r5, r5, r1
-       strd    r4, [r7]
+       strd    r4, r5, [r7]
 
        /* Update level 1 entries */
        mov     r6, #4
        mov     r7, r2
-2:     ldrd    r4, [r7]
+2:     ldrd    r4, r5, [r7]
        adds    r4, r4, r0
        adc     r5, r5, r1
-       strd    r4, [r7], #1 << L1_ORDER
+       strd    r4, r5, [r7], #1 << L1_ORDER
        subs    r6, r6, #1
        bne     2b
 
index c0a242c..93fd7fc 100644 (file)
@@ -92,7 +92,6 @@ config OMAP_32K_TIMER
 config OMAP3_L2_AUX_SECURE_SAVE_RESTORE
        bool "OMAP3 HS/EMU save and restore for L2 AUX control register"
        depends on ARCH_OMAP3 && PM
-       default n
        help
          Without this option, L2 Auxiliary control register contents are
          lost during off-mode entry on HS/EMU devices. This feature
index aff6e6e..ee7b079 100644 (file)
@@ -573,7 +573,7 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
         */
        ufp_exc->fpexc = hwstate->fpexc;
        ufp_exc->fpinst = hwstate->fpinst;
-       ufp_exc->fpinst2 = ufp_exc->fpinst2;
+       ufp_exc->fpinst2 = hwstate->fpinst2;
 
        /* Ensure that VFP is disabled. */
        vfp_flush_hwstate(thread);