asm-generic/mmiowb: Add generic implementation of mmiowb() tracking
authorWill Deacon <will.deacon@arm.com>
Fri, 22 Feb 2019 12:48:44 +0000 (12:48 +0000)
committerWill Deacon <will.deacon@arm.com>
Mon, 8 Apr 2019 10:59:39 +0000 (11:59 +0100)
In preparation for removing all explicit mmiowb() calls from driver
code, implement a tracking system in asm-generic based loosely on the
PowerPC implementation. This allows architectures with a non-empty
mmiowb() definition to have the barrier automatically inserted in
spin_unlock() following a critical section containing an I/O write.

Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
include/asm-generic/mmiowb.h [new file with mode: 0644]
include/asm-generic/mmiowb_types.h [new file with mode: 0644]
kernel/Kconfig.locks
kernel/locking/spinlock.c

diff --git a/include/asm-generic/mmiowb.h b/include/asm-generic/mmiowb.h
new file mode 100644 (file)
index 0000000..9439ff0
--- /dev/null
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MMIOWB_H
+#define __ASM_GENERIC_MMIOWB_H
+
+/*
+ * Generic implementation of mmiowb() tracking for spinlocks.
+ *
+ * If your architecture doesn't ensure that writes to an I/O peripheral
+ * within two spinlocked sections on two different CPUs are seen by the
+ * peripheral in the order corresponding to the lock handover, then you
+ * need to follow these FIVE easy steps:
+ *
+ *     1. Implement mmiowb() (and arch_mmiowb_state() if you're fancy)
+ *        in asm/mmiowb.h, then #include this file
+ *     2. Ensure your I/O write accessors call mmiowb_set_pending()
+ *     3. Select ARCH_HAS_MMIOWB
+ *     4. Untangle the resulting mess of header files
+ *     5. Complain to your architects
+ */
+#ifdef CONFIG_MMIOWB
+
+#include <linux/compiler.h>
+#include <asm-generic/mmiowb_types.h>
+
+#ifndef arch_mmiowb_state
+#include <asm/percpu.h>
+#include <asm/smp.h>
+
+DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
+#define __mmiowb_state()       this_cpu_ptr(&__mmiowb_state)
+#else
+#define __mmiowb_state()       arch_mmiowb_state()
+#endif /* arch_mmiowb_state */
+
+static inline void mmiowb_set_pending(void)
+{
+       struct mmiowb_state *ms = __mmiowb_state();
+       ms->mmiowb_pending = ms->nesting_count;
+}
+
+static inline void mmiowb_spin_lock(void)
+{
+       struct mmiowb_state *ms = __mmiowb_state();
+       ms->nesting_count++;
+}
+
+static inline void mmiowb_spin_unlock(void)
+{
+       struct mmiowb_state *ms = __mmiowb_state();
+
+       if (unlikely(ms->mmiowb_pending)) {
+               ms->mmiowb_pending = 0;
+               mmiowb();
+       }
+
+       ms->nesting_count--;
+}
+#else
+#define mmiowb_set_pending()           do { } while (0)
+#define mmiowb_spin_lock()             do { } while (0)
+#define mmiowb_spin_unlock()           do { } while (0)
+#endif /* CONFIG_MMIOWB */
+#endif /* __ASM_GENERIC_MMIOWB_H */
diff --git a/include/asm-generic/mmiowb_types.h b/include/asm-generic/mmiowb_types.h
new file mode 100644 (file)
index 0000000..8eb0095
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MMIOWB_TYPES_H
+#define __ASM_GENERIC_MMIOWB_TYPES_H
+
+#include <linux/types.h>
+
+struct mmiowb_state {
+       u16     nesting_count;
+       u16     mmiowb_pending;
+};
+
+#endif /* __ASM_GENERIC_MMIOWB_TYPES_H */
index fbba478..6ba2570 100644 (file)
@@ -251,3 +251,10 @@ config ARCH_USE_QUEUED_RWLOCKS
 config QUEUED_RWLOCKS
        def_bool y if ARCH_USE_QUEUED_RWLOCKS
        depends on SMP
+
+config ARCH_HAS_MMIOWB
+       bool
+
+config MMIOWB
+       def_bool y if ARCH_HAS_MMIOWB
+       depends on SMP
index 936f3d1..0ff0838 100644 (file)
 #include <linux/debug_locks.h>
 #include <linux/export.h>
 
+#ifdef CONFIG_MMIOWB
+#ifndef arch_mmiowb_state
+DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state);
+EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
+#endif
+#endif
+
 /*
  * If lockdep is enabled then we use the non-preemption spin-ops
  * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are