Merge tag 's390-5.5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 14 Dec 2019 20:34:37 +0000 (12:34 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 14 Dec 2019 20:34:37 +0000 (12:34 -0800)
Pull s390 updates from Vasily Gorbik:

 - Add support for KASAN_VMALLOC feature.

 - Remove the last user of problematic diag 0x44 call.

 - Adjust sampling interval and avoid sample data block overflow
   condition on pressure in perf code.

 - Prefer EOPNOTSUPP over ENOTSUPP and comments fixes.

* tag 's390-5.5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/kasan: add KASAN_VMALLOC support
  s390: remove last diag 0x44 caller
  s390/uv: use EOPNOTSUPP instead of ENOTSUPP
  s390/cpum_sf: Avoid SBD overflow condition in irq handler
  s390/cpum_sf: Adjust sampling interval to avoid hitting sample limits
  s390/test_unwind: fix spelling mistake "reqister" -> "register"
  s390/spinlock: remove confusing comment in arch_spin_lock_wait

arch/s390/Kconfig
arch/s390/include/asm/setup.h
arch/s390/include/asm/uv.h
arch/s390/kernel/early.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/smp.c
arch/s390/lib/spinlock.c
arch/s390/lib/test_unwind.c
arch/s390/mm/kasan_init.c

index d4051e8..bc88841 100644 (file)
@@ -124,6 +124,7 @@ config S390
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_JUMP_LABEL_RELATIVE
        select HAVE_ARCH_KASAN
+       select HAVE_ARCH_KASAN_VMALLOC
        select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_SOFT_DIRTY
index 6dc6c4f..69289e9 100644 (file)
@@ -27,7 +27,6 @@
 #define MACHINE_FLAG_DIAG9C    BIT(3)
 #define MACHINE_FLAG_ESOP      BIT(4)
 #define MACHINE_FLAG_IDTE      BIT(5)
-#define MACHINE_FLAG_DIAG44    BIT(6)
 #define MACHINE_FLAG_EDAT1     BIT(7)
 #define MACHINE_FLAG_EDAT2     BIT(8)
 #define MACHINE_FLAG_TOPOLOGY  BIT(10)
@@ -94,7 +93,6 @@ extern unsigned long __swsusp_reset_dma;
 #define MACHINE_HAS_DIAG9C     (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
 #define MACHINE_HAS_ESOP       (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP)
 #define MACHINE_HAS_IDTE       (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
-#define MACHINE_HAS_DIAG44     (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
 #define MACHINE_HAS_EDAT1      (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
 #define MACHINE_HAS_EDAT2      (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
 #define MACHINE_HAS_TOPOLOGY   (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
index ef3c00b..4093a28 100644 (file)
@@ -86,7 +86,7 @@ static inline int share(unsigned long addr, u16 cmd)
        };
 
        if (!is_prot_virt_guest())
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        /*
         * Sharing is page wise, if we encounter addresses that are
         * not page aligned, we assume something went wrong. If
index db32a55..cd241ee 100644 (file)
@@ -204,21 +204,6 @@ static __init void detect_diag9c(void)
                S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
 }
 
-static __init void detect_diag44(void)
-{
-       int rc;
-
-       diag_stat_inc(DIAG_STAT_X044);
-       asm volatile(
-               "       diag    0,0,0x44\n"
-               "0:     la      %0,0\n"
-               "1:\n"
-               EX_TABLE(0b,1b)
-               : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
-       if (!rc)
-               S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
-}
-
 static __init void detect_machine_facilities(void)
 {
        if (test_facility(8)) {
@@ -331,7 +316,6 @@ void __init startup_init(void)
        setup_arch_string();
        setup_boot_command_line();
        detect_diag9c();
-       detect_diag44();
        detect_machine_facilities();
        save_vector_registers();
        setup_topology();
index c07fdcd..77d93c5 100644 (file)
@@ -1303,18 +1303,28 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
                 */
                if (flush_all && done)
                        break;
-
-               /* If an event overflow happened, discard samples by
-                * processing any remaining sample-data-blocks.
-                */
-               if (event_overflow)
-                       flush_all = 1;
        }
 
        /* Account sample overflows in the event hardware structure */
        if (sampl_overflow)
                OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
                                                 sampl_overflow, 1 + num_sdb);
+
+       /* Perf_event_overflow() and perf_event_account_interrupt() limit
+        * the interrupt rate to an upper limit. Roughly 1000 samples per
+        * task tick.
+        * Hitting this limit results in a large number
+        * of throttled REF_REPORT_THROTTLE entries and the samples
+        * are dropped.
+        * Slightly increase the interval to avoid hitting this limit.
+        */
+       if (event_overflow) {
+               SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10);
+               debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n",
+                                   __func__,
+                                   DIV_ROUND_UP(SAMPL_RATE(hwc), 10));
+       }
+
        if (sampl_overflow || event_overflow)
                debug_sprintf_event(sfdbg, 4, "%s: "
                                    "overflows: sample %llu event %llu"
index 2794cad..a08bd25 100644 (file)
@@ -413,14 +413,11 @@ EXPORT_SYMBOL(arch_vcpu_is_preempted);
 
 void smp_yield_cpu(int cpu)
 {
-       if (MACHINE_HAS_DIAG9C) {
-               diag_stat_inc_norecursion(DIAG_STAT_X09C);
-               asm volatile("diag %0,0,0x9c"
-                            : : "d" (pcpu_devices[cpu].address));
-       } else if (MACHINE_HAS_DIAG44 && !smp_cpu_mtid) {
-               diag_stat_inc_norecursion(DIAG_STAT_X044);
-               asm volatile("diag 0,0,0x44");
-       }
+       if (!MACHINE_HAS_DIAG9C)
+               return;
+       diag_stat_inc_norecursion(DIAG_STAT_X09C);
+       asm volatile("diag %0,0,0x9c"
+                    : : "d" (pcpu_devices[cpu].address));
 }
 
 /*
index ce1e4bb..9b2dab5 100644 (file)
@@ -242,7 +242,6 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
 
 void arch_spin_lock_wait(arch_spinlock_t *lp)
 {
-       /* Use classic spinlocks + niai if the steal time is >= 10% */
        if (test_cpu_flag(CIF_DEDICATED_CPU))
                arch_spin_lock_queued(lp);
        else
index bda7ac0..32b7a30 100644 (file)
@@ -238,7 +238,7 @@ static int test_unwind_irq(struct unwindme *u)
 {
        preempt_disable();
        if (register_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler)) {
-               pr_info("Couldn't reqister external interrupt handler");
+               pr_info("Couldn't register external interrupt handler");
                return -1;
        }
        u->task = current;
index 460f255..0634561 100644 (file)
@@ -82,7 +82,8 @@ static pte_t * __init kasan_early_pte_alloc(void)
 enum populate_mode {
        POPULATE_ONE2ONE,
        POPULATE_MAP,
-       POPULATE_ZERO_SHADOW
+       POPULATE_ZERO_SHADOW,
+       POPULATE_SHALLOW
 };
 static void __init kasan_early_vmemmap_populate(unsigned long address,
                                                unsigned long end,
@@ -116,6 +117,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
                        pgd_populate(&init_mm, pg_dir, p4_dir);
                }
 
+               if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
+                   mode == POPULATE_SHALLOW) {
+                       address = (address + P4D_SIZE) & P4D_MASK;
+                       continue;
+               }
+
                p4_dir = p4d_offset(pg_dir, address);
                if (p4d_none(*p4_dir)) {
                        if (mode == POPULATE_ZERO_SHADOW &&
@@ -130,6 +137,12 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
                        p4d_populate(&init_mm, p4_dir, pu_dir);
                }
 
+               if (!IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
+                   mode == POPULATE_SHALLOW) {
+                       address = (address + PUD_SIZE) & PUD_MASK;
+                       continue;
+               }
+
                pu_dir = pud_offset(p4_dir, address);
                if (pud_none(*pu_dir)) {
                        if (mode == POPULATE_ZERO_SHADOW &&
@@ -195,6 +208,9 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
                                page = kasan_early_shadow_page;
                                pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
                                break;
+                       case POPULATE_SHALLOW:
+                               /* should never happen */
+                               break;
                        }
                }
                address += PAGE_SIZE;
@@ -313,22 +329,50 @@ void __init kasan_early_init(void)
        init_mm.pgd = early_pg_dir;
        /*
         * Current memory layout:
-        * +- 0 -------------+   +- shadow start -+
-        * | 1:1 ram mapping |  /| 1/8 ram        |
-        * +- end of ram ----+ / +----------------+
-        * | ... gap ...     |/  |      kasan     |
-        * +- shadow start --+   |      zero      |
-        * | 1/8 addr space  |   |      page      |
-        * +- shadow end    -+   |      mapping   |
-        * | ... gap ...     |\  |    (untracked) |
-        * +- modules vaddr -+ \ +----------------+
-        * | 2Gb             |  \|      unmapped  | allocated per module
-        * +-----------------+   +- shadow end ---+
+        * +- 0 -------------+     +- shadow start -+
+        * | 1:1 ram mapping |    /| 1/8 ram        |
+        * |                 |   / |                |
+        * +- end of ram ----+  /  +----------------+
+        * | ... gap ...     | /   |                |
+        * |                 |/    |    kasan       |
+        * +- shadow start --+     |    zero        |
+        * | 1/8 addr space  |     |    page        |
+        * +- shadow end    -+     |    mapping     |
+        * | ... gap ...     |\    |  (untracked)   |
+        * +- vmalloc area  -+ \   |                |
+        * | vmalloc_size    |  \  |                |
+        * +- modules vaddr -+   \ +----------------+
+        * | 2Gb             |    \|      unmapped  | allocated per module
+        * +-----------------+     +- shadow end ---+
+        *
+        * Current memory layout (KASAN_VMALLOC):
+        * +- 0 -------------+     +- shadow start -+
+        * | 1:1 ram mapping |    /| 1/8 ram        |
+        * |                 |   / |                |
+        * +- end of ram ----+  /  +----------------+
+        * | ... gap ...     | /   |    kasan       |
+        * |                 |/    |    zero        |
+        * +- shadow start --+     |    page        |
+        * | 1/8 addr space  |     |    mapping     |
+        * +- shadow end    -+     |  (untracked)   |
+        * | ... gap ...     |\    |                |
+        * +- vmalloc area  -+ \   +- vmalloc area -+
+        * | vmalloc_size    |  \  |shallow populate|
+        * +- modules vaddr -+   \ +- modules area -+
+        * | 2Gb             |    \|shallow populate|
+        * +-----------------+     +- shadow end ---+
         */
        /* populate kasan shadow (for identity mapping and zero page mapping) */
        kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
        if (IS_ENABLED(CONFIG_MODULES))
                untracked_mem_end = vmax - MODULES_LEN;
+       if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+               untracked_mem_end = vmax - vmalloc_size - MODULES_LEN;
+               /* shallowly populate kasan shadow for vmalloc and modules */
+               kasan_early_vmemmap_populate(__sha(untracked_mem_end),
+                                            __sha(vmax), POPULATE_SHALLOW);
+       }
+       /* populate kasan shadow for untracked memory */
        kasan_early_vmemmap_populate(__sha(max_physmem_end),
                                     __sha(untracked_mem_end),
                                     POPULATE_ZERO_SHADOW);