1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
32 #include <drm/ttm/ttm_module.h>
33 #include <linux/atomic.h>
34 #include <linux/errno.h>
35 #include <linux/wait.h>
36 #include <linux/sched/signal.h>
38 #include "ttm_object.h"
40 #define TTM_WRITE_LOCK_PENDING (1 << 0)
41 #define TTM_VT_LOCK_PENDING (1 << 1)
42 #define TTM_SUSPEND_LOCK_PENDING (1 << 2)
43 #define TTM_VT_LOCK (1 << 3)
44 #define TTM_SUSPEND_LOCK (1 << 4)
46 void ttm_lock_init(struct ttm_lock *lock)
48 spin_lock_init(&lock->lock);
49 init_waitqueue_head(&lock->queue);
52 lock->kill_takers = false;
53 lock->signal = SIGKILL;
56 void ttm_read_unlock(struct ttm_lock *lock)
58 spin_lock(&lock->lock);
60 wake_up_all(&lock->queue);
61 spin_unlock(&lock->lock);
64 static bool __ttm_read_lock(struct ttm_lock *lock)
68 spin_lock(&lock->lock);
69 if (unlikely(lock->kill_takers)) {
70 send_sig(lock->signal, current, 0);
71 spin_unlock(&lock->lock);
74 if (lock->rw >= 0 && lock->flags == 0) {
78 spin_unlock(&lock->lock);
82 int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
87 ret = wait_event_interruptible(lock->queue,
88 __ttm_read_lock(lock));
90 wait_event(lock->queue, __ttm_read_lock(lock));
94 static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
100 spin_lock(&lock->lock);
101 if (unlikely(lock->kill_takers)) {
102 send_sig(lock->signal, current, 0);
103 spin_unlock(&lock->lock);
106 if (lock->rw >= 0 && lock->flags == 0) {
110 } else if (lock->flags == 0) {
113 spin_unlock(&lock->lock);
118 int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
124 ret = wait_event_interruptible
125 (lock->queue, __ttm_read_trylock(lock, &locked));
127 wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
129 if (unlikely(ret != 0)) {
134 return (locked) ? 0 : -EBUSY;
137 void ttm_write_unlock(struct ttm_lock *lock)
139 spin_lock(&lock->lock);
141 wake_up_all(&lock->queue);
142 spin_unlock(&lock->lock);
145 static bool __ttm_write_lock(struct ttm_lock *lock)
149 spin_lock(&lock->lock);
150 if (unlikely(lock->kill_takers)) {
151 send_sig(lock->signal, current, 0);
152 spin_unlock(&lock->lock);
155 if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
157 lock->flags &= ~TTM_WRITE_LOCK_PENDING;
160 lock->flags |= TTM_WRITE_LOCK_PENDING;
162 spin_unlock(&lock->lock);
166 int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
171 ret = wait_event_interruptible(lock->queue,
172 __ttm_write_lock(lock));
173 if (unlikely(ret != 0)) {
174 spin_lock(&lock->lock);
175 lock->flags &= ~TTM_WRITE_LOCK_PENDING;
176 wake_up_all(&lock->queue);
177 spin_unlock(&lock->lock);
180 wait_event(lock->queue, __ttm_write_lock(lock));
185 static int __ttm_vt_unlock(struct ttm_lock *lock)
189 spin_lock(&lock->lock);
190 if (unlikely(!(lock->flags & TTM_VT_LOCK)))
192 lock->flags &= ~TTM_VT_LOCK;
193 wake_up_all(&lock->queue);
194 spin_unlock(&lock->lock);
199 static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
201 struct ttm_base_object *base = *p_base;
202 struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
206 ret = __ttm_vt_unlock(lock);
210 static bool __ttm_vt_lock(struct ttm_lock *lock)
214 spin_lock(&lock->lock);
216 lock->flags &= ~TTM_VT_LOCK_PENDING;
217 lock->flags |= TTM_VT_LOCK;
220 lock->flags |= TTM_VT_LOCK_PENDING;
222 spin_unlock(&lock->lock);
226 int ttm_vt_lock(struct ttm_lock *lock,
228 struct ttm_object_file *tfile)
233 ret = wait_event_interruptible(lock->queue,
234 __ttm_vt_lock(lock));
235 if (unlikely(ret != 0)) {
236 spin_lock(&lock->lock);
237 lock->flags &= ~TTM_VT_LOCK_PENDING;
238 wake_up_all(&lock->queue);
239 spin_unlock(&lock->lock);
243 wait_event(lock->queue, __ttm_vt_lock(lock));
246 * Add a base-object, the destructor of which will
247 * make sure the lock is released if the client dies
251 ret = ttm_base_object_init(tfile, &lock->base, false,
252 ttm_lock_type, &ttm_vt_lock_remove, NULL);
254 (void)__ttm_vt_unlock(lock);
256 lock->vt_holder = tfile;
261 int ttm_vt_unlock(struct ttm_lock *lock)
263 return ttm_ref_object_base_unref(lock->vt_holder,
264 lock->base.handle, TTM_REF_USAGE);
267 void ttm_suspend_unlock(struct ttm_lock *lock)
269 spin_lock(&lock->lock);
270 lock->flags &= ~TTM_SUSPEND_LOCK;
271 wake_up_all(&lock->queue);
272 spin_unlock(&lock->lock);
275 static bool __ttm_suspend_lock(struct ttm_lock *lock)
279 spin_lock(&lock->lock);
281 lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
282 lock->flags |= TTM_SUSPEND_LOCK;
285 lock->flags |= TTM_SUSPEND_LOCK_PENDING;
287 spin_unlock(&lock->lock);
291 void ttm_suspend_lock(struct ttm_lock *lock)
293 wait_event(lock->queue, __ttm_suspend_lock(lock));