locking/ww_mutex: Implement rtmutex based ww_mutex API functions
authorPeter Zijlstra <peterz@infradead.org>
Sun, 15 Aug 2021 21:29:00 +0000 (23:29 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 17 Aug 2021 17:05:26 +0000 (19:05 +0200)
Add the actual ww_mutex API functions which replace the mutex based variant
on RT enabled kernels.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211305.024057938@linutronix.de
kernel/locking/Makefile
kernel/locking/ww_rt_mutex.c [new file with mode: 0644]

index 683f0b7..d51cabf 100644 (file)
@@ -25,7 +25,7 @@ obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
 obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
 obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
 obj-$(CONFIG_RT_MUTEXES) += rtmutex_api.o
-obj-$(CONFIG_PREEMPT_RT) += spinlock_rt.o
+obj-$(CONFIG_PREEMPT_RT) += spinlock_rt.o ww_rt_mutex.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
 obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
diff --git a/kernel/locking/ww_rt_mutex.c b/kernel/locking/ww_rt_mutex.c
new file mode 100644 (file)
index 0000000..3f1fff7
--- /dev/null
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * rtmutex API
+ */
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#define RT_MUTEX_BUILD_MUTEX
+#define WW_RT
+#include "rtmutex.c"
+
+static int __sched
+__ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
+                  unsigned int state, unsigned long ip)
+{
+       struct lockdep_map __maybe_unused *nest_lock = NULL;
+       struct rt_mutex *rtm = &lock->base;
+       int ret;
+
+       might_sleep();
+
+       if (ww_ctx) {
+               if (unlikely(ww_ctx == READ_ONCE(lock->ctx)))
+                       return -EALREADY;
+
+               /*
+                * Reset the wounded flag after a kill. No other process can
+                * race and wound us here, since they can't have a valid owner
+                * pointer if we don't have any locks held.
+                */
+               if (ww_ctx->acquired == 0)
+                       ww_ctx->wounded = 0;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+               nest_lock = &ww_ctx->dep_map;
+#endif
+       }
+       mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
+
+       if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
+               if (ww_ctx)
+                       ww_mutex_set_context_fastpath(lock, ww_ctx);
+               return 0;
+       }
+
+       ret = rt_mutex_slowlock(&rtm->rtmutex, ww_ctx, state);
+
+       if (ret)
+               mutex_release(&rtm->dep_map, ip);
+       return ret;
+}
+
+int __sched
+ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+       return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_);
+}
+EXPORT_SYMBOL(ww_mutex_lock);
+
+int __sched
+ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+       return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_);
+}
+EXPORT_SYMBOL(ww_mutex_lock_interruptible);
+
+void __sched ww_mutex_unlock(struct ww_mutex *lock)
+{
+       struct rt_mutex *rtm = &lock->base;
+
+       __ww_mutex_unlock(lock);
+
+       mutex_release(&rtm->dep_map, _RET_IP_);
+       __rt_mutex_unlock(&rtm->rtmutex);
+}
+EXPORT_SYMBOL(ww_mutex_unlock);