powerpc/pseries/svm: Use shared memory for LPPACA structures
authorAnshuman Khandual <khandual@linux.vnet.ibm.com>
Tue, 20 Aug 2019 02:13:18 +0000 (23:13 -0300)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 29 Aug 2019 23:55:40 +0000 (09:55 +1000)
LPPACA structures need to be shared with the host. Hence they need to be in
shared memory. Instead of allocating individual chunks of memory for a
given structure from memblock, a contiguous chunk of memory is allocated
and then converted into shared memory. Subsequent allocation requests will
come from the contiguous chunk which will be always shared memory for all
structures.

While we are able to use a kmem_cache constructor for the Debug Trace Log,
LPPACAs are allocated very early in the boot process (before SLUB is
available) so we need to use a simpler scheme here.

Introduce helper is_svm_platform() which uses the S bit of the MSR to tell
whether we're running as a secure guest.

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Thiago Jung Bauermann <bauerman@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190820021326.6884-9-bauerman@linux.ibm.com
arch/powerpc/include/asm/svm.h [new file with mode: 0644]
arch/powerpc/kernel/paca.c

diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h
new file mode 100644 (file)
index 0000000..2689d8d
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SVM helper functions
+ *
+ * Copyright 2018 Anshuman Khandual, IBM Corporation.
+ */
+
+#ifndef _ASM_POWERPC_SVM_H
+#define _ASM_POWERPC_SVM_H
+
+#ifdef CONFIG_PPC_SVM
+
+static inline bool is_secure_guest(void)
+{
+       return mfmsr() & MSR_S;
+}
+
+#else /* CONFIG_PPC_SVM */
+
+static inline bool is_secure_guest(void)
+{
+       return false;
+}
+
+#endif /* CONFIG_PPC_SVM */
+#endif /* _ASM_POWERPC_SVM_H */
index 612fc87..949eceb 100644 (file)
@@ -14,6 +14,8 @@
 #include <asm/sections.h>
 #include <asm/pgtable.h>
 #include <asm/kexec.h>
+#include <asm/svm.h>
+#include <asm/ultravisor.h>
 
 #include "setup.h"
 
@@ -54,6 +56,41 @@ static void *__init alloc_paca_data(unsigned long size, unsigned long align,
 
 #define LPPACA_SIZE 0x400
 
+static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align,
+                                       unsigned long limit, int cpu)
+{
+       size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE);
+       static unsigned long shared_lppaca_size;
+       static void *shared_lppaca;
+       void *ptr;
+
+       if (!shared_lppaca) {
+               memblock_set_bottom_up(true);
+
+               shared_lppaca =
+                       memblock_alloc_try_nid(shared_lppaca_total_size,
+                                              PAGE_SIZE, MEMBLOCK_LOW_LIMIT,
+                                              limit, NUMA_NO_NODE);
+               if (!shared_lppaca)
+                       panic("cannot allocate shared data");
+
+               memblock_set_bottom_up(false);
+               uv_share_page(PHYS_PFN(__pa(shared_lppaca)),
+                             shared_lppaca_total_size >> PAGE_SHIFT);
+       }
+
+       ptr = shared_lppaca + shared_lppaca_size;
+       shared_lppaca_size += size;
+
+       /*
+        * This is very early in boot, so no harm done if the kernel crashes at
+        * this point.
+        */
+       BUG_ON(shared_lppaca_size >= shared_lppaca_total_size);
+
+       return ptr;
+}
+
 /*
  * See asm/lppaca.h for more detail.
  *
@@ -83,7 +120,11 @@ static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
        if (early_cpu_has_feature(CPU_FTR_HVMODE))
                return NULL;
 
-       lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu);
+       if (is_secure_guest())
+               lp = alloc_shared_lppaca(LPPACA_SIZE, 0x400, limit, cpu);
+       else
+               lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu);
+
        init_lppaca(lp);
 
        return lp;