Merge tag 'timers-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / arch / arm / include / asm / memory.h
index 99035b5..2f841cb 100644 (file)
@@ -18,6 +18,7 @@
 #ifdef CONFIG_NEED_MACH_MEMORY_H
 #include <mach/memory.h>
 #endif
+#include <asm/kasan_def.h>
 
 /* PAGE_OFFSET - the virtual address of the start of the kernel image */
 #define PAGE_OFFSET            UL(CONFIG_PAGE_OFFSET)
  * TASK_SIZE - the maximum size of a user space task.
  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
  */
+#ifndef CONFIG_KASAN
 #define TASK_SIZE              (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
+#else
+#define TASK_SIZE              (KASAN_SHADOW_START)
+#endif
 #define TASK_UNMAPPED_BASE     ALIGN(TASK_SIZE / 3, SZ_16M)
 
 /*
  */
 #define XIP_VIRT_ADDR(physaddr)  (MODULES_VADDR + ((physaddr) & 0x000fffff))
 
+#define FDT_FIXED_BASE         UL(0xff800000)
+#define FDT_FIXED_SIZE         (2 * SECTION_SIZE)
+#define FDT_VIRT_BASE(physbase)        ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE))
+
 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
 /*
  * Allow 16MB-aligned ioremap pages
@@ -107,6 +116,7 @@ extern unsigned long vectors_base;
 #define MODULES_VADDR          PAGE_OFFSET
 
 #define XIP_VIRT_ADDR(physaddr)  (physaddr)
+#define FDT_VIRT_BASE(physbase)  ((void *)(physbase))
 
 #endif /* !CONFIG_MMU */
 
@@ -173,6 +183,7 @@ extern unsigned long vectors_base;
  * so that all we need to do is modify the 8-bit constant field.
  */
 #define __PV_BITS_31_24        0x81000000
+#define __PV_BITS_23_16        0x810000
 #define __PV_BITS_7_0  0x81
 
 extern unsigned long __pv_phys_pfn_offset;
@@ -183,43 +194,65 @@ extern const void *__pv_table_begin, *__pv_table_end;
 #define PHYS_OFFSET    ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
 #define PHYS_PFN_OFFSET        (__pv_phys_pfn_offset)
 
-#define __pv_stub(from,to,instr,type)                  \
+#ifndef CONFIG_THUMB2_KERNEL
+#define __pv_stub(from,to,instr)                       \
        __asm__("@ __pv_stub\n"                         \
        "1:     " instr "       %0, %1, %2\n"           \
+       "2:     " instr "       %0, %0, %3\n"           \
        "       .pushsection .pv_table,\"a\"\n"         \
-       "       .long   1b\n"                           \
+       "       .long   1b - ., 2b - .\n"               \
        "       .popsection\n"                          \
        : "=r" (to)                                     \
-       : "r" (from), "I" (type))
+       : "r" (from), "I" (__PV_BITS_31_24),            \
+         "I"(__PV_BITS_23_16))
 
-#define __pv_stub_mov_hi(t)                            \
-       __asm__ volatile("@ __pv_stub_mov\n"            \
-       "1:     mov     %R0, %1\n"                      \
+#define __pv_add_carry_stub(x, y)                      \
+       __asm__("@ __pv_add_carry_stub\n"               \
+       "0:     movw    %R0, #0\n"                      \
+       "       adds    %Q0, %1, %R0, lsl #20\n"        \
+       "1:     mov     %R0, %2\n"                      \
+       "       adc     %R0, %R0, #0\n"                 \
        "       .pushsection .pv_table,\"a\"\n"         \
-       "       .long   1b\n"                           \
+       "       .long   0b - ., 1b - .\n"               \
        "       .popsection\n"                          \
-       : "=r" (t)                                      \
-       : "I" (__PV_BITS_7_0))
+       : "=&r" (y)                                     \
+       : "r" (x), "I" (__PV_BITS_7_0)                  \
+       : "cc")
+
+#else
+#define __pv_stub(from,to,instr)                       \
+       __asm__("@ __pv_stub\n"                         \
+       "0:     movw    %0, #0\n"                       \
+       "       lsl     %0, #21\n"                      \
+       "       " instr " %0, %1, %0\n"                 \
+       "       .pushsection .pv_table,\"a\"\n"         \
+       "       .long   0b - .\n"                       \
+       "       .popsection\n"                          \
+       : "=&r" (to)                                    \
+       : "r" (from))
 
 #define __pv_add_carry_stub(x, y)                      \
-       __asm__ volatile("@ __pv_add_carry_stub\n"      \
-       "1:     adds    %Q0, %1, %2\n"                  \
+       __asm__("@ __pv_add_carry_stub\n"               \
+       "0:     movw    %R0, #0\n"                      \
+       "       lsls    %R0, #21\n"                     \
+       "       adds    %Q0, %1, %R0\n"                 \
+       "1:     mvn     %R0, #0\n"                      \
        "       adc     %R0, %R0, #0\n"                 \
        "       .pushsection .pv_table,\"a\"\n"         \
-       "       .long   1b\n"                           \
+       "       .long   0b - ., 1b - .\n"               \
        "       .popsection\n"                          \
-       : "+r" (y)                                      \
-       : "r" (x), "I" (__PV_BITS_31_24)                \
+       : "=&r" (y)                                     \
+       : "r" (x)                                       \
        : "cc")
+#endif
 
 static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
 {
        phys_addr_t t;
 
        if (sizeof(phys_addr_t) == 4) {
-               __pv_stub(x, t, "add", __PV_BITS_31_24);
+               __pv_stub(x, t, "add");
        } else {
-               __pv_stub_mov_hi(t);
                __pv_add_carry_stub(x, t);
        }
        return t;
@@ -235,7 +268,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
         * assembler expression receives 32 bit argument
         * in place where 'r' 32 bit operand is expected.
         */
-       __pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24);
+       __pv_stub((unsigned long) x, t, "sub");
        return t;
 }