2 * Copyright 2017 Red Hat
3 * Parts ported from amdgpu (fence wait code).
4 * Copyright 2016 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a
33 * container for a synchronization primitive which can be used by userspace
34 * to explicitly synchronize GPU commands, can be shared between userspace
35 * processes, and can be shared between different DRM drivers.
36 * Their primary use-case is to implement Vulkan fences and semaphores.
37 * The syncobj userspace API provides ioctls for several operations:
39 * - Creation and destruction of syncobjs
40 * - Import and export of syncobjs to/from a syncobj file descriptor
41 * - Import and export a syncobj's underlying fence to/from a sync file
42 * - Reset a syncobj (set its fence to NULL)
43 * - Signal a syncobj (set a trivially signaled fence)
44 * - Wait for a syncobj's fence to appear and be signaled
46 * At it's core, a syncobj is simply a wrapper around a pointer to a struct
47 * &dma_fence which may be NULL.
48 * When a syncobj is first created, its pointer is either NULL or a pointer
49 * to an already signaled fence depending on whether the
50 * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
51 * &DRM_IOCTL_SYNCOBJ_CREATE.
52 * When GPU work which signals a syncobj is enqueued in a DRM driver,
53 * the syncobj fence is replaced with a fence which will be signaled by the
54 * completion of that work.
55 * When GPU work which waits on a syncobj is enqueued in a DRM driver, the
56 * driver retrieves syncobj's current fence at the time the work is enqueued
57 * waits on that fence before submitting the work to hardware.
58 * If the syncobj's fence is NULL, the enqueue operation is expected to fail.
59 * All manipulation of the syncobjs's fence happens in terms of the current
60 * fence at the time the ioctl is called by userspace regardless of whether
61 * that operation is an immediate host-side operation (signal or reset) or
62 * or an operation which is enqueued in some driver queue.
63 * &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used to
64 * manipulate a syncobj from the host by resetting its pointer to NULL or
65 * setting its pointer to a fence which is already signaled.
68 * Host-side wait on syncobjs
69 * --------------------------
71 * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a
72 * host-side wait on all of the syncobj fences simultaneously.
73 * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on
74 * all of the syncobj fences to be signaled before it returns.
75 * Otherwise, it returns once at least one syncobj fence has been signaled
76 * and the index of a signaled fence is written back to the client.
78 * Unlike the enqueued GPU work dependencies which fail if they see a NULL
79 * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
80 * the host-side wait will first wait for the syncobj to receive a non-NULL
81 * fence and then wait on that fence.
82 * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the
83 * syncobjs in the array has a NULL fence, -EINVAL will be returned.
84 * Assuming the syncobj starts off with a NULL fence, this allows a client
85 * to do a host wait in one thread (or process) which waits on GPU work
86 * submitted in another thread (or process) without having to manually
87 * synchronize between the two.
88 * This requirement is inherited from the Vulkan fence API.
91 * Import/export of syncobjs
92 * -------------------------
94 * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
95 * provide two mechanisms for import/export of syncobjs.
97 * The first lets the client import or export an entire syncobj to a file
99 * These fd's are opaque and have no other use case, except passing the
100 * syncobj between processes.
101 * All exported file descriptors and any syncobj handles created as a
102 * result of importing those file descriptors own a reference to the
103 * same underlying struct &drm_syncobj and the syncobj can be used
104 * persistently across all the processes with which it is shared.
105 * The syncobj is freed only once the last reference is dropped.
106 * Unlike dma-buf, importing a syncobj creates a new handle (with its own
107 * reference) for every import instead of de-duplicating.
108 * The primary use-case of this persistent import/export is for shared
109 * Vulkan fences and semaphores.
111 * The second import/export mechanism, which is indicated by
112 * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or
113 * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client
114 * import/export the syncobj's current fence from/to a &sync_file.
115 * When a syncobj is exported to a sync file, that sync file wraps the
116 * sycnobj's fence at the time of export and any later signal or reset
117 * operations on the syncobj will not affect the exported sync file.
118 * When a sync file is imported into a syncobj, the syncobj's fence is set
119 * to the fence wrapped by that sync file.
120 * Because sync files are immutable, resetting or signaling the syncobj
121 * will not affect any sync files whose fences have been imported into the
125 #include <linux/anon_inodes.h>
126 #include <linux/file.h>
127 #include <linux/fs.h>
128 #include <linux/sched/signal.h>
129 #include <linux/sync_file.h>
130 #include <linux/uaccess.h>
133 #include <drm/drm_drv.h>
134 #include <drm/drm_file.h>
135 #include <drm/drm_gem.h>
136 #include <drm/drm_print.h>
137 #include <drm/drm_syncobj.h>
139 #include "drm_internal.h"
141 struct syncobj_wait_entry {
142 struct list_head node;
143 struct task_struct *task;
144 struct dma_fence *fence;
145 struct dma_fence_cb fence_cb;
149 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
150 struct syncobj_wait_entry *wait);
153 * drm_syncobj_find - lookup and reference a sync object.
154 * @file_private: drm file private pointer
155 * @handle: sync object handle to lookup.
157 * Returns a reference to the syncobj pointed to by handle or NULL. The
158 * reference must be released by calling drm_syncobj_put().
160 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
163 struct drm_syncobj *syncobj;
165 spin_lock(&file_private->syncobj_table_lock);
167 /* Check if we currently have a reference on the object */
168 syncobj = idr_find(&file_private->syncobj_idr, handle);
170 drm_syncobj_get(syncobj);
172 spin_unlock(&file_private->syncobj_table_lock);
176 EXPORT_SYMBOL(drm_syncobj_find);
178 static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
179 struct syncobj_wait_entry *wait)
181 struct dma_fence *fence;
186 spin_lock(&syncobj->lock);
187 /* We've already tried once to get a fence and failed. Now that we
188 * have the lock, try one more time just to be sure we don't add a
189 * callback when a fence has already been set.
191 fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
192 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
193 dma_fence_put(fence);
194 list_add_tail(&wait->node, &syncobj->cb_list);
196 wait->fence = dma_fence_get_stub();
200 spin_unlock(&syncobj->lock);
203 static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
204 struct syncobj_wait_entry *wait)
206 if (!wait->node.next)
209 spin_lock(&syncobj->lock);
210 list_del_init(&wait->node);
211 spin_unlock(&syncobj->lock);
215 * drm_syncobj_add_point - add new timeline point to the syncobj
216 * @syncobj: sync object to add timeline point do
217 * @chain: chain node to use to add the point
218 * @fence: fence to encapsulate in the chain node
219 * @point: sequence number to use for the point
221 * Add the chain node as new timeline point to the syncobj.
223 void drm_syncobj_add_point(struct drm_syncobj *syncobj,
224 struct dma_fence_chain *chain,
225 struct dma_fence *fence,
228 struct syncobj_wait_entry *cur, *tmp;
229 struct dma_fence *prev;
231 dma_fence_get(fence);
233 spin_lock(&syncobj->lock);
235 prev = drm_syncobj_fence_get(syncobj);
236 /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
237 if (prev && prev->seqno >= point)
238 DRM_ERROR("You are adding an unorder point to timeline!\n");
239 dma_fence_chain_init(chain, prev, fence, point);
240 rcu_assign_pointer(syncobj->fence, &chain->base);
242 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
243 syncobj_wait_syncobj_func(syncobj, cur);
244 spin_unlock(&syncobj->lock);
246 /* Walk the chain once to trigger garbage collection */
247 dma_fence_chain_for_each(fence, prev);
250 EXPORT_SYMBOL(drm_syncobj_add_point);
253 * drm_syncobj_replace_fence - replace fence in a sync object.
254 * @syncobj: Sync object to replace fence in
255 * @fence: fence to install in sync file.
257 * This replaces the fence on a sync object.
259 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
260 struct dma_fence *fence)
262 struct dma_fence *old_fence;
263 struct syncobj_wait_entry *cur, *tmp;
266 dma_fence_get(fence);
268 spin_lock(&syncobj->lock);
270 old_fence = rcu_dereference_protected(syncobj->fence,
271 lockdep_is_held(&syncobj->lock));
272 rcu_assign_pointer(syncobj->fence, fence);
274 if (fence != old_fence) {
275 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
276 syncobj_wait_syncobj_func(syncobj, cur);
279 spin_unlock(&syncobj->lock);
281 dma_fence_put(old_fence);
283 EXPORT_SYMBOL(drm_syncobj_replace_fence);
286 * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
287 * @syncobj: sync object to assign the fence on
289 * Assign a already signaled stub fence to the sync object.
291 static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
293 struct dma_fence *fence = dma_fence_get_stub();
295 drm_syncobj_replace_fence(syncobj, fence);
296 dma_fence_put(fence);
299 /* 5s default for wait submission */
300 #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
302 * drm_syncobj_find_fence - lookup and reference the fence in a sync object
303 * @file_private: drm file private pointer
304 * @handle: sync object handle to lookup.
305 * @point: timeline point
306 * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
307 * @fence: out parameter for the fence
309 * This is just a convenience function that combines drm_syncobj_find() and
310 * drm_syncobj_fence_get().
312 * Returns 0 on success or a negative error value on failure. On success @fence
313 * contains a reference to the fence, which must be released by calling
316 int drm_syncobj_find_fence(struct drm_file *file_private,
317 u32 handle, u64 point, u64 flags,
318 struct dma_fence **fence)
320 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
321 struct syncobj_wait_entry wait;
322 u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
328 *fence = drm_syncobj_fence_get(syncobj);
329 drm_syncobj_put(syncobj);
332 ret = dma_fence_chain_find_seqno(fence, point);
335 dma_fence_put(*fence);
340 if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
343 memset(&wait, 0, sizeof(wait));
346 drm_syncobj_fence_add_wait(syncobj, &wait);
349 set_current_state(TASK_INTERRUPTIBLE);
359 if (signal_pending(current)) {
364 timeout = schedule_timeout(timeout);
367 __set_current_state(TASK_RUNNING);
371 drm_syncobj_remove_wait(syncobj, &wait);
375 EXPORT_SYMBOL(drm_syncobj_find_fence);
378 * drm_syncobj_free - free a sync object.
379 * @kref: kref to free.
381 * Only to be called from kref_put in drm_syncobj_put.
383 void drm_syncobj_free(struct kref *kref)
385 struct drm_syncobj *syncobj = container_of(kref,
388 drm_syncobj_replace_fence(syncobj, NULL);
391 EXPORT_SYMBOL(drm_syncobj_free);
394 * drm_syncobj_create - create a new syncobj
395 * @out_syncobj: returned syncobj
396 * @flags: DRM_SYNCOBJ_* flags
397 * @fence: if non-NULL, the syncobj will represent this fence
399 * This is the first function to create a sync object. After creating, drivers
400 * probably want to make it available to userspace, either through
401 * drm_syncobj_get_handle() or drm_syncobj_get_fd().
403 * Returns 0 on success or a negative error value on failure.
405 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
406 struct dma_fence *fence)
408 struct drm_syncobj *syncobj;
410 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
414 kref_init(&syncobj->refcount);
415 INIT_LIST_HEAD(&syncobj->cb_list);
416 spin_lock_init(&syncobj->lock);
418 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
419 drm_syncobj_assign_null_handle(syncobj);
422 drm_syncobj_replace_fence(syncobj, fence);
424 *out_syncobj = syncobj;
427 EXPORT_SYMBOL(drm_syncobj_create);
430 * drm_syncobj_get_handle - get a handle from a syncobj
431 * @file_private: drm file private pointer
432 * @syncobj: Sync object to export
433 * @handle: out parameter with the new handle
435 * Exports a sync object created with drm_syncobj_create() as a handle on
436 * @file_private to userspace.
438 * Returns 0 on success or a negative error value on failure.
440 int drm_syncobj_get_handle(struct drm_file *file_private,
441 struct drm_syncobj *syncobj, u32 *handle)
445 /* take a reference to put in the idr */
446 drm_syncobj_get(syncobj);
448 idr_preload(GFP_KERNEL);
449 spin_lock(&file_private->syncobj_table_lock);
450 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
451 spin_unlock(&file_private->syncobj_table_lock);
456 drm_syncobj_put(syncobj);
463 EXPORT_SYMBOL(drm_syncobj_get_handle);
465 static int drm_syncobj_create_as_handle(struct drm_file *file_private,
466 u32 *handle, uint32_t flags)
469 struct drm_syncobj *syncobj;
471 ret = drm_syncobj_create(&syncobj, flags, NULL);
475 ret = drm_syncobj_get_handle(file_private, syncobj, handle);
476 drm_syncobj_put(syncobj);
480 static int drm_syncobj_destroy(struct drm_file *file_private,
483 struct drm_syncobj *syncobj;
485 spin_lock(&file_private->syncobj_table_lock);
486 syncobj = idr_remove(&file_private->syncobj_idr, handle);
487 spin_unlock(&file_private->syncobj_table_lock);
492 drm_syncobj_put(syncobj);
496 static int drm_syncobj_file_release(struct inode *inode, struct file *file)
498 struct drm_syncobj *syncobj = file->private_data;
500 drm_syncobj_put(syncobj);
504 static const struct file_operations drm_syncobj_file_fops = {
505 .release = drm_syncobj_file_release,
509 * drm_syncobj_get_fd - get a file descriptor from a syncobj
510 * @syncobj: Sync object to export
511 * @p_fd: out parameter with the new file descriptor
513 * Exports a sync object created with drm_syncobj_create() as a file descriptor.
515 * Returns 0 on success or a negative error value on failure.
517 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
522 fd = get_unused_fd_flags(O_CLOEXEC);
526 file = anon_inode_getfile("syncobj_file",
527 &drm_syncobj_file_fops,
531 return PTR_ERR(file);
534 drm_syncobj_get(syncobj);
535 fd_install(fd, file);
540 EXPORT_SYMBOL(drm_syncobj_get_fd);
542 static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
543 u32 handle, int *p_fd)
545 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
551 ret = drm_syncobj_get_fd(syncobj, p_fd);
552 drm_syncobj_put(syncobj);
556 static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
559 struct drm_syncobj *syncobj;
560 struct fd f = fdget(fd);
566 if (f.file->f_op != &drm_syncobj_file_fops) {
571 /* take a reference to put in the idr */
572 syncobj = f.file->private_data;
573 drm_syncobj_get(syncobj);
575 idr_preload(GFP_KERNEL);
576 spin_lock(&file_private->syncobj_table_lock);
577 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
578 spin_unlock(&file_private->syncobj_table_lock);
585 drm_syncobj_put(syncobj);
591 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
594 struct dma_fence *fence = sync_file_get_fence(fd);
595 struct drm_syncobj *syncobj;
600 syncobj = drm_syncobj_find(file_private, handle);
602 dma_fence_put(fence);
606 drm_syncobj_replace_fence(syncobj, fence);
607 dma_fence_put(fence);
608 drm_syncobj_put(syncobj);
612 static int drm_syncobj_export_sync_file(struct drm_file *file_private,
613 int handle, int *p_fd)
616 struct dma_fence *fence;
617 struct sync_file *sync_file;
618 int fd = get_unused_fd_flags(O_CLOEXEC);
623 ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
627 sync_file = sync_file_create(fence);
629 dma_fence_put(fence);
636 fd_install(fd, sync_file->file);
645 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
646 * @file_private: drm file-private structure to set up
648 * Called at device open time, sets up the structure for handling refcounting
652 drm_syncobj_open(struct drm_file *file_private)
654 idr_init_base(&file_private->syncobj_idr, 1);
655 spin_lock_init(&file_private->syncobj_table_lock);
659 drm_syncobj_release_handle(int id, void *ptr, void *data)
661 struct drm_syncobj *syncobj = ptr;
663 drm_syncobj_put(syncobj);
668 * drm_syncobj_release - release file-private sync object resources
669 * @file_private: drm file-private structure to clean up
671 * Called at close time when the filp is going away.
673 * Releases any remaining references on objects by this filp.
676 drm_syncobj_release(struct drm_file *file_private)
678 idr_for_each(&file_private->syncobj_idr,
679 &drm_syncobj_release_handle, file_private);
680 idr_destroy(&file_private->syncobj_idr);
684 drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
685 struct drm_file *file_private)
687 struct drm_syncobj_create *args = data;
689 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
692 /* no valid flags yet */
693 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
696 return drm_syncobj_create_as_handle(file_private,
697 &args->handle, args->flags);
701 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
702 struct drm_file *file_private)
704 struct drm_syncobj_destroy *args = data;
706 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
709 /* make sure padding is empty */
712 return drm_syncobj_destroy(file_private, args->handle);
716 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
717 struct drm_file *file_private)
719 struct drm_syncobj_handle *args = data;
721 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
727 if (args->flags != 0 &&
728 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
731 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
732 return drm_syncobj_export_sync_file(file_private, args->handle,
735 return drm_syncobj_handle_to_fd(file_private, args->handle,
740 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
741 struct drm_file *file_private)
743 struct drm_syncobj_handle *args = data;
745 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
751 if (args->flags != 0 &&
752 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
755 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
756 return drm_syncobj_import_sync_file_fence(file_private,
760 return drm_syncobj_fd_to_handle(file_private, args->fd,
764 static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
765 struct drm_syncobj_transfer *args)
767 struct drm_syncobj *timeline_syncobj = NULL;
768 struct dma_fence *fence;
769 struct dma_fence_chain *chain;
772 timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
773 if (!timeline_syncobj) {
776 ret = drm_syncobj_find_fence(file_private, args->src_handle,
777 args->src_point, args->flags,
781 chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
786 drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
788 dma_fence_put(fence);
790 drm_syncobj_put(timeline_syncobj);
796 drm_syncobj_transfer_to_binary(struct drm_file *file_private,
797 struct drm_syncobj_transfer *args)
799 struct drm_syncobj *binary_syncobj = NULL;
800 struct dma_fence *fence;
803 binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
806 ret = drm_syncobj_find_fence(file_private, args->src_handle,
807 args->src_point, args->flags, &fence);
810 drm_syncobj_replace_fence(binary_syncobj, fence);
811 dma_fence_put(fence);
813 drm_syncobj_put(binary_syncobj);
818 drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
819 struct drm_file *file_private)
821 struct drm_syncobj_transfer *args = data;
824 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
831 ret = drm_syncobj_transfer_to_timeline(file_private, args);
833 ret = drm_syncobj_transfer_to_binary(file_private, args);
838 static void syncobj_wait_fence_func(struct dma_fence *fence,
839 struct dma_fence_cb *cb)
841 struct syncobj_wait_entry *wait =
842 container_of(cb, struct syncobj_wait_entry, fence_cb);
844 wake_up_process(wait->task);
847 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
848 struct syncobj_wait_entry *wait)
850 struct dma_fence *fence;
852 /* This happens inside the syncobj lock */
853 fence = rcu_dereference_protected(syncobj->fence,
854 lockdep_is_held(&syncobj->lock));
855 dma_fence_get(fence);
856 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
857 dma_fence_put(fence);
860 wait->fence = dma_fence_get_stub();
865 wake_up_process(wait->task);
866 list_del_init(&wait->node);
869 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
870 void __user *user_points,
876 struct syncobj_wait_entry *entries;
877 struct dma_fence *fence;
879 uint32_t signaled_count, i;
881 points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
886 memset(points, 0, count * sizeof(uint64_t));
888 } else if (copy_from_user(points, user_points,
889 sizeof(uint64_t) * count)) {
891 goto err_free_points;
894 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
897 goto err_free_points;
899 /* Walk the list of sync objects and initialize entries. We do
900 * this up-front so that we can properly return -EINVAL if there is
901 * a syncobj with a missing fence and then never have the chance of
902 * returning -EINVAL again.
905 for (i = 0; i < count; ++i) {
906 struct dma_fence *fence;
908 entries[i].task = current;
909 entries[i].point = points[i];
910 fence = drm_syncobj_fence_get(syncobjs[i]);
911 if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
912 dma_fence_put(fence);
913 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
917 goto cleanup_entries;
922 entries[i].fence = fence;
924 entries[i].fence = dma_fence_get_stub();
926 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
927 dma_fence_is_signaled(entries[i].fence)) {
928 if (signaled_count == 0 && idx)
934 if (signaled_count == count ||
935 (signaled_count > 0 &&
936 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
937 goto cleanup_entries;
939 /* There's a very annoying laxness in the dma_fence API here, in
940 * that backends are not required to automatically report when a
941 * fence is signaled prior to fence->ops->enable_signaling() being
942 * called. So here if we fail to match signaled_count, we need to
943 * fallthough and try a 0 timeout wait!
946 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
947 for (i = 0; i < count; ++i)
948 drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
952 set_current_state(TASK_INTERRUPTIBLE);
955 for (i = 0; i < count; ++i) {
956 fence = entries[i].fence;
960 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
961 dma_fence_is_signaled(fence) ||
962 (!entries[i].fence_cb.func &&
963 dma_fence_add_callback(fence,
964 &entries[i].fence_cb,
965 syncobj_wait_fence_func))) {
966 /* The fence has been signaled */
967 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
977 if (signaled_count == count)
985 if (signal_pending(current)) {
986 timeout = -ERESTARTSYS;
990 timeout = schedule_timeout(timeout);
994 __set_current_state(TASK_RUNNING);
997 for (i = 0; i < count; ++i) {
998 drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
999 if (entries[i].fence_cb.func)
1000 dma_fence_remove_callback(entries[i].fence,
1001 &entries[i].fence_cb);
1002 dma_fence_put(entries[i].fence);
1013 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
1015 * @timeout_nsec: timeout nsec component in ns, 0 for poll
1017 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
1019 signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
1021 ktime_t abs_timeout, now;
1022 u64 timeout_ns, timeout_jiffies64;
1024 /* make 0 timeout means poll - absolute 0 doesn't seem valid */
1025 if (timeout_nsec == 0)
1028 abs_timeout = ns_to_ktime(timeout_nsec);
1031 if (!ktime_after(abs_timeout, now))
1034 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
1036 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
1037 /* clamp timeout to avoid infinite timeout */
1038 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
1039 return MAX_SCHEDULE_TIMEOUT - 1;
1041 return timeout_jiffies64 + 1;
1043 EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
1045 static int drm_syncobj_array_wait(struct drm_device *dev,
1046 struct drm_file *file_private,
1047 struct drm_syncobj_wait *wait,
1048 struct drm_syncobj_timeline_wait *timeline_wait,
1049 struct drm_syncobj **syncobjs, bool timeline)
1051 signed long timeout = 0;
1052 uint32_t first = ~0;
1055 timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
1056 timeout = drm_syncobj_array_wait_timeout(syncobjs,
1058 wait->count_handles,
1063 wait->first_signaled = first;
1065 timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
1066 timeout = drm_syncobj_array_wait_timeout(syncobjs,
1067 u64_to_user_ptr(timeline_wait->points),
1068 timeline_wait->count_handles,
1069 timeline_wait->flags,
1073 timeline_wait->first_signaled = first;
1078 static int drm_syncobj_array_find(struct drm_file *file_private,
1079 void __user *user_handles,
1080 uint32_t count_handles,
1081 struct drm_syncobj ***syncobjs_out)
1083 uint32_t i, *handles;
1084 struct drm_syncobj **syncobjs;
1087 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
1088 if (handles == NULL)
1091 if (copy_from_user(handles, user_handles,
1092 sizeof(uint32_t) * count_handles)) {
1094 goto err_free_handles;
1097 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
1098 if (syncobjs == NULL) {
1100 goto err_free_handles;
1103 for (i = 0; i < count_handles; i++) {
1104 syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
1107 goto err_put_syncobjs;
1112 *syncobjs_out = syncobjs;
1117 drm_syncobj_put(syncobjs[i]);
1125 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
1129 for (i = 0; i < count; i++)
1130 drm_syncobj_put(syncobjs[i]);
1135 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
1136 struct drm_file *file_private)
1138 struct drm_syncobj_wait *args = data;
1139 struct drm_syncobj **syncobjs;
1142 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1145 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1146 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
1149 if (args->count_handles == 0)
1152 ret = drm_syncobj_array_find(file_private,
1153 u64_to_user_ptr(args->handles),
1154 args->count_handles,
1159 ret = drm_syncobj_array_wait(dev, file_private,
1160 args, NULL, syncobjs, false);
1162 drm_syncobj_array_free(syncobjs, args->count_handles);
1168 drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
1169 struct drm_file *file_private)
1171 struct drm_syncobj_timeline_wait *args = data;
1172 struct drm_syncobj **syncobjs;
1175 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1178 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1179 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1180 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
1183 if (args->count_handles == 0)
1186 ret = drm_syncobj_array_find(file_private,
1187 u64_to_user_ptr(args->handles),
1188 args->count_handles,
1193 ret = drm_syncobj_array_wait(dev, file_private,
1194 NULL, args, syncobjs, true);
1196 drm_syncobj_array_free(syncobjs, args->count_handles);
1203 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
1204 struct drm_file *file_private)
1206 struct drm_syncobj_array *args = data;
1207 struct drm_syncobj **syncobjs;
1211 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1217 if (args->count_handles == 0)
1220 ret = drm_syncobj_array_find(file_private,
1221 u64_to_user_ptr(args->handles),
1222 args->count_handles,
1227 for (i = 0; i < args->count_handles; i++)
1228 drm_syncobj_replace_fence(syncobjs[i], NULL);
1230 drm_syncobj_array_free(syncobjs, args->count_handles);
1236 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
1237 struct drm_file *file_private)
1239 struct drm_syncobj_array *args = data;
1240 struct drm_syncobj **syncobjs;
1244 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1250 if (args->count_handles == 0)
1253 ret = drm_syncobj_array_find(file_private,
1254 u64_to_user_ptr(args->handles),
1255 args->count_handles,
1260 for (i = 0; i < args->count_handles; i++)
1261 drm_syncobj_assign_null_handle(syncobjs[i]);
1263 drm_syncobj_array_free(syncobjs, args->count_handles);
1269 drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
1270 struct drm_file *file_private)
1272 struct drm_syncobj_timeline_array *args = data;
1273 struct drm_syncobj **syncobjs;
1274 struct dma_fence_chain **chains;
1279 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1285 if (args->count_handles == 0)
1288 ret = drm_syncobj_array_find(file_private,
1289 u64_to_user_ptr(args->handles),
1290 args->count_handles,
1295 points = kmalloc_array(args->count_handles, sizeof(*points),
1301 if (!u64_to_user_ptr(args->points)) {
1302 memset(points, 0, args->count_handles * sizeof(uint64_t));
1303 } else if (copy_from_user(points, u64_to_user_ptr(args->points),
1304 sizeof(uint64_t) * args->count_handles)) {
1309 chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
1314 for (i = 0; i < args->count_handles; i++) {
1315 chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
1317 for (j = 0; j < i; j++)
1324 for (i = 0; i < args->count_handles; i++) {
1325 struct dma_fence *fence = dma_fence_get_stub();
1327 drm_syncobj_add_point(syncobjs[i], chains[i],
1329 dma_fence_put(fence);
1336 drm_syncobj_array_free(syncobjs, args->count_handles);
1341 int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
1342 struct drm_file *file_private)
1344 struct drm_syncobj_timeline_array *args = data;
1345 struct drm_syncobj **syncobjs;
1346 uint64_t __user *points = u64_to_user_ptr(args->points);
1350 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1356 if (args->count_handles == 0)
1359 ret = drm_syncobj_array_find(file_private,
1360 u64_to_user_ptr(args->handles),
1361 args->count_handles,
1366 for (i = 0; i < args->count_handles; i++) {
1367 struct dma_fence_chain *chain;
1368 struct dma_fence *fence;
1371 fence = drm_syncobj_fence_get(syncobjs[i]);
1372 chain = to_dma_fence_chain(fence);
1374 struct dma_fence *iter, *last_signaled = NULL;
1376 dma_fence_chain_for_each(iter, fence) {
1377 if (iter->context != fence->context) {
1378 dma_fence_put(iter);
1379 /* It is most likely that timeline has
1380 * unorder points. */
1383 dma_fence_put(last_signaled);
1384 last_signaled = dma_fence_get(iter);
1386 point = dma_fence_is_signaled(last_signaled) ?
1387 last_signaled->seqno :
1388 to_dma_fence_chain(last_signaled)->prev_seqno;
1389 dma_fence_put(last_signaled);
1393 ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
1394 ret = ret ? -EFAULT : 0;
1398 drm_syncobj_array_free(syncobjs, args->count_handles);