powerpc/pseries: Introduce rwlock to gatekeep DTLB usage
authorNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Wed, 3 Jul 2019 17:03:57 +0000 (22:33 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 4 Jul 2019 12:23:38 +0000 (22:23 +1000)
Since we would be introducing a new user of the DTL buffer in a
subsequent patch, we need a way to gatekeep use of the DTL buffer.

The current debugfs interface for DTL allows registering and opening
cpu-specific DTL buffers. Cpu specific files are exposed under
debugfs 'powerpc/dtl/' node, and changing 'dtl_event_mask' in the same
directory enables controlling the event mask used when registering DTL
buffer for a particular cpu.

Subsequently, we will be introducing a user of the DTL buffers that
registers access to the DTL buffers across all cpus with the same event
mask. To ensure these two users do not step on each other, we introduce
a rwlock to gatekeep DTL buffer access. This fits the requirement of the
current debugfs interface wanting to allow multiple independent
cpu-specific users (read lock), and the subsequent user wanting
exclusive access (write lock).

Suggested-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/lppaca.h
arch/powerpc/platforms/pseries/dtl.c
arch/powerpc/platforms/pseries/lpar.c

index a8ac2b8..e45b729 100644 (file)
@@ -32,6 +32,7 @@
  */
 #include <linux/cache.h>
 #include <linux/threads.h>
+#include <linux/spinlock_types.h>
 #include <asm/types.h>
 #include <asm/mmu.h>
 #include <asm/firmware.h>
@@ -166,6 +167,7 @@ struct dtl_entry {
 #define DTL_LOG_ALL            (DTL_LOG_CEDE | DTL_LOG_PREEMPT | DTL_LOG_FAULT)
 
 extern struct kmem_cache *dtl_cache;
+extern rwlock_t dtl_access_lock;
 
 /*
  * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
index fb05804..ae4fb2a 100644 (file)
@@ -193,11 +193,16 @@ static int dtl_enable(struct dtl *dtl)
        if (dtl->buf)
                return -EBUSY;
 
+       /* ensure there are no other conflicting dtl users */
+       if (!read_trylock(&dtl_access_lock))
+               return -EBUSY;
+
        n_entries = dtl_buf_entries;
        buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
        if (!buf) {
                printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
                                __func__, dtl->cpu);
+               read_unlock(&dtl_access_lock);
                return -ENOMEM;
        }
 
@@ -214,8 +219,11 @@ static int dtl_enable(struct dtl *dtl)
        }
        spin_unlock(&dtl->lock);
 
-       if (rc)
+       if (rc) {
+               read_unlock(&dtl_access_lock);
                kmem_cache_free(dtl_cache, buf);
+       }
+
        return rc;
 }
 
@@ -227,6 +235,7 @@ static void dtl_disable(struct dtl *dtl)
        dtl->buf = NULL;
        dtl->buf_entries = 0;
        spin_unlock(&dtl->lock);
+       read_unlock(&dtl_access_lock);
 }
 
 /* file interface */
index da7e7c2..7869121 100644 (file)
@@ -113,6 +113,10 @@ void register_dtl_buffer(int cpu)
        }
 }
 
+#ifdef CONFIG_PPC_SPLPAR
+DEFINE_RWLOCK(dtl_access_lock);
+#endif /* CONFIG_PPC_SPLPAR */
+
 void vpa_init(int cpu)
 {
        int hwcpu = get_hard_smp_processor_id(cpu);