1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "vmwgfx_drv.h"
31 #define VMW_FENCE_WRAP (1 << 31)
33 struct vmw_fence_manager {
34 int num_fence_objects;
35 struct vmw_private *dev_priv;
37 struct list_head fence_list;
38 struct work_struct work;
41 u32 event_fence_action_size;
43 struct list_head cleanup_list;
44 uint32_t pending_actions[VMW_ACTION_MAX];
45 struct mutex goal_irq_mutex;
46 bool goal_irq_on; /* Protected by @goal_irq_mutex */
47 bool seqno_valid; /* Protected by @lock, and may not be set to true
48 without the @goal_irq_mutex held. */
52 struct vmw_user_fence {
53 struct ttm_base_object base;
54 struct vmw_fence_obj fence;
58 * struct vmw_event_fence_action - fence action that delivers a drm event.
60 * @e: A struct drm_pending_event that controls the event delivery.
61 * @action: A struct vmw_fence_action to hook up to a fence.
62 * @fence: A referenced pointer to the fence to keep it alive while @action
64 * @dev: Pointer to a struct drm_device so we can access the event stuff.
65 * @kref: Both @e and @action has destructors, so we need to refcount.
66 * @size: Size accounted for this object.
67 * @tv_sec: If non-null, the variable pointed to will be assigned
68 * current time tv_sec val when the fence signals.
69 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
70 * be assigned the current time tv_usec val when the fence signals.
72 struct vmw_event_fence_action {
73 struct vmw_fence_action action;
75 struct drm_pending_event *event;
76 struct vmw_fence_obj *fence;
77 struct drm_device *dev;
83 static struct vmw_fence_manager *
84 fman_from_fence(struct vmw_fence_obj *fence)
86 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
90 * Note on fencing subsystem usage of irqs:
91 * Typically the vmw_fences_update function is called
93 * a) When a new fence seqno has been submitted by the fifo code.
94 * b) On-demand when we have waiters. Sleeping waiters will switch on the
95 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
96 * irq is received. When the last fence waiter is gone, that IRQ is masked
99 * In situations where there are no waiters and we don't submit any new fences,
100 * fence objects may not be signaled. This is perfectly OK, since there are
101 * no consumers of the signaled data, but that is NOT ok when there are fence
102 * actions attached to a fence. The fencing subsystem then makes use of the
103 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
104 * which has an action attached, and each time vmw_fences_update is called,
105 * the subsystem makes sure the fence goal seqno is updated.
107 * The fence goal seqno irq is on as long as there are unsignaled fence
108 * objects with actions attached to them.
111 static void vmw_fence_obj_destroy(struct dma_fence *f)
113 struct vmw_fence_obj *fence =
114 container_of(f, struct vmw_fence_obj, base);
116 struct vmw_fence_manager *fman = fman_from_fence(fence);
118 spin_lock(&fman->lock);
119 list_del_init(&fence->head);
120 --fman->num_fence_objects;
121 spin_unlock(&fman->lock);
122 fence->destroy(fence);
125 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
130 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
135 static bool vmw_fence_enable_signaling(struct dma_fence *f)
137 struct vmw_fence_obj *fence =
138 container_of(f, struct vmw_fence_obj, base);
140 struct vmw_fence_manager *fman = fman_from_fence(fence);
141 struct vmw_private *dev_priv = fman->dev_priv;
143 u32 *fifo_mem = dev_priv->mmio_virt;
144 u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
145 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
148 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
153 struct vmwgfx_wait_cb {
154 struct dma_fence_cb base;
155 struct task_struct *task;
159 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
161 struct vmwgfx_wait_cb *wait =
162 container_of(cb, struct vmwgfx_wait_cb, base);
164 wake_up_process(wait->task);
167 static void __vmw_fences_update(struct vmw_fence_manager *fman);
169 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
171 struct vmw_fence_obj *fence =
172 container_of(f, struct vmw_fence_obj, base);
174 struct vmw_fence_manager *fman = fman_from_fence(fence);
175 struct vmw_private *dev_priv = fman->dev_priv;
176 struct vmwgfx_wait_cb cb;
179 if (likely(vmw_fence_obj_signaled(fence)))
182 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
183 vmw_seqno_waiter_add(dev_priv);
187 if (intr && signal_pending(current)) {
192 cb.base.func = vmwgfx_wait_cb;
194 list_add(&cb.base.node, &f->cb_list);
197 __vmw_fences_update(fman);
200 * We can use the barrier free __set_current_state() since
201 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
205 __set_current_state(TASK_INTERRUPTIBLE);
207 __set_current_state(TASK_UNINTERRUPTIBLE);
209 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
210 if (ret == 0 && timeout > 0)
215 if (intr && signal_pending(current)) {
223 spin_unlock(f->lock);
225 ret = schedule_timeout(ret);
229 __set_current_state(TASK_RUNNING);
230 if (!list_empty(&cb.base.node))
231 list_del(&cb.base.node);
234 spin_unlock(f->lock);
236 vmw_seqno_waiter_remove(dev_priv);
241 static const struct dma_fence_ops vmw_fence_ops = {
242 .get_driver_name = vmw_fence_get_driver_name,
243 .get_timeline_name = vmw_fence_get_timeline_name,
244 .enable_signaling = vmw_fence_enable_signaling,
245 .wait = vmw_fence_wait,
246 .release = vmw_fence_obj_destroy,
251 * Execute signal actions on fences recently signaled.
252 * This is done from a workqueue so we don't have to execute
253 * signal actions from atomic context.
256 static void vmw_fence_work_func(struct work_struct *work)
258 struct vmw_fence_manager *fman =
259 container_of(work, struct vmw_fence_manager, work);
260 struct list_head list;
261 struct vmw_fence_action *action, *next_action;
265 INIT_LIST_HEAD(&list);
266 mutex_lock(&fman->goal_irq_mutex);
268 spin_lock(&fman->lock);
269 list_splice_init(&fman->cleanup_list, &list);
270 seqno_valid = fman->seqno_valid;
271 spin_unlock(&fman->lock);
273 if (!seqno_valid && fman->goal_irq_on) {
274 fman->goal_irq_on = false;
275 vmw_goal_waiter_remove(fman->dev_priv);
277 mutex_unlock(&fman->goal_irq_mutex);
279 if (list_empty(&list))
283 * At this point, only we should be able to manipulate the
284 * list heads of the actions we have on the private list.
285 * hence fman::lock not held.
288 list_for_each_entry_safe(action, next_action, &list, head) {
289 list_del_init(&action->head);
291 action->cleanup(action);
296 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
298 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
303 fman->dev_priv = dev_priv;
304 spin_lock_init(&fman->lock);
305 INIT_LIST_HEAD(&fman->fence_list);
306 INIT_LIST_HEAD(&fman->cleanup_list);
307 INIT_WORK(&fman->work, &vmw_fence_work_func);
308 fman->fifo_down = true;
309 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
311 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
312 fman->event_fence_action_size =
313 ttm_round_pot(sizeof(struct vmw_event_fence_action));
314 mutex_init(&fman->goal_irq_mutex);
315 fman->ctx = dma_fence_context_alloc(1);
320 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
324 (void) cancel_work_sync(&fman->work);
326 spin_lock(&fman->lock);
327 lists_empty = list_empty(&fman->fence_list) &&
328 list_empty(&fman->cleanup_list);
329 spin_unlock(&fman->lock);
331 BUG_ON(!lists_empty);
335 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
336 struct vmw_fence_obj *fence, u32 seqno,
337 void (*destroy) (struct vmw_fence_obj *fence))
341 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
343 INIT_LIST_HEAD(&fence->seq_passed_actions);
344 fence->destroy = destroy;
346 spin_lock(&fman->lock);
347 if (unlikely(fman->fifo_down)) {
351 list_add_tail(&fence->head, &fman->fence_list);
352 ++fman->num_fence_objects;
355 spin_unlock(&fman->lock);
360 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
361 struct list_head *list)
363 struct vmw_fence_action *action, *next_action;
365 list_for_each_entry_safe(action, next_action, list, head) {
366 list_del_init(&action->head);
367 fman->pending_actions[action->type]--;
368 if (action->seq_passed != NULL)
369 action->seq_passed(action);
372 * Add the cleanup action to the cleanup list so that
373 * it will be performed by a worker task.
376 list_add_tail(&action->head, &fman->cleanup_list);
381 * vmw_fence_goal_new_locked - Figure out a new device fence goal
384 * @fman: Pointer to a fence manager.
385 * @passed_seqno: The seqno the device currently signals as passed.
387 * This function should be called with the fence manager lock held.
388 * It is typically called when we have a new passed_seqno, and
389 * we might need to update the fence goal. It checks to see whether
390 * the current fence goal has already passed, and, in that case,
391 * scans through all unsignaled fences to get the next fence object with an
392 * action attached, and sets the seqno of that fence as a new fence goal.
394 * returns true if the device goal seqno was updated. False otherwise.
396 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
401 struct vmw_fence_obj *fence;
403 if (likely(!fman->seqno_valid))
406 fifo_mem = fman->dev_priv->mmio_virt;
407 goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
408 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
411 fman->seqno_valid = false;
412 list_for_each_entry(fence, &fman->fence_list, head) {
413 if (!list_empty(&fence->seq_passed_actions)) {
414 fman->seqno_valid = true;
415 vmw_mmio_write(fence->base.seqno,
416 fifo_mem + SVGA_FIFO_FENCE_GOAL);
426 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
429 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
430 * considered as a device fence goal.
432 * This function should be called with the fence manager lock held.
433 * It is typically called when an action has been attached to a fence to
434 * check whether the seqno of that fence should be used for a fence
435 * goal interrupt. This is typically needed if the current fence goal is
436 * invalid, or has a higher seqno than that of the current fence object.
438 * returns true if the device goal seqno was updated. False otherwise.
440 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
442 struct vmw_fence_manager *fman = fman_from_fence(fence);
446 if (dma_fence_is_signaled_locked(&fence->base))
449 fifo_mem = fman->dev_priv->mmio_virt;
450 goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
451 if (likely(fman->seqno_valid &&
452 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
455 vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
456 fman->seqno_valid = true;
461 static void __vmw_fences_update(struct vmw_fence_manager *fman)
463 struct vmw_fence_obj *fence, *next_fence;
464 struct list_head action_list;
466 uint32_t seqno, new_seqno;
467 u32 *fifo_mem = fman->dev_priv->mmio_virt;
469 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
471 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
472 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
473 list_del_init(&fence->head);
474 dma_fence_signal_locked(&fence->base);
475 INIT_LIST_HEAD(&action_list);
476 list_splice_init(&fence->seq_passed_actions,
478 vmw_fences_perform_actions(fman, &action_list);
484 * Rerun if the fence goal seqno was updated, and the
485 * hardware might have raced with that update, so that
486 * we missed a fence_goal irq.
489 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
490 if (unlikely(needs_rerun)) {
491 new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
492 if (new_seqno != seqno) {
498 if (!list_empty(&fman->cleanup_list))
499 (void) schedule_work(&fman->work);
502 void vmw_fences_update(struct vmw_fence_manager *fman)
504 spin_lock(&fman->lock);
505 __vmw_fences_update(fman);
506 spin_unlock(&fman->lock);
509 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
511 struct vmw_fence_manager *fman = fman_from_fence(fence);
513 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
516 vmw_fences_update(fman);
518 return dma_fence_is_signaled(&fence->base);
521 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
522 bool interruptible, unsigned long timeout)
524 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
534 void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
536 struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
538 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
541 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
543 dma_fence_free(&fence->base);
546 int vmw_fence_create(struct vmw_fence_manager *fman,
548 struct vmw_fence_obj **p_fence)
550 struct vmw_fence_obj *fence;
553 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
554 if (unlikely(!fence))
557 ret = vmw_fence_obj_init(fman, fence, seqno,
559 if (unlikely(ret != 0))
571 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
573 struct vmw_user_fence *ufence =
574 container_of(fence, struct vmw_user_fence, fence);
575 struct vmw_fence_manager *fman = fman_from_fence(fence);
577 ttm_base_object_kfree(ufence, base);
579 * Free kernel space accounting.
581 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
582 fman->user_fence_size);
585 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
587 struct ttm_base_object *base = *p_base;
588 struct vmw_user_fence *ufence =
589 container_of(base, struct vmw_user_fence, base);
590 struct vmw_fence_obj *fence = &ufence->fence;
593 vmw_fence_obj_unreference(&fence);
596 int vmw_user_fence_create(struct drm_file *file_priv,
597 struct vmw_fence_manager *fman,
599 struct vmw_fence_obj **p_fence,
602 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
603 struct vmw_user_fence *ufence;
604 struct vmw_fence_obj *tmp;
605 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
606 struct ttm_operation_ctx ctx = {
607 .interruptible = false,
613 * Kernel memory space accounting, since this object may
614 * be created by a user-space request.
617 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
619 if (unlikely(ret != 0))
622 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
623 if (unlikely(!ufence)) {
628 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
629 vmw_user_fence_destroy);
630 if (unlikely(ret != 0)) {
636 * The base object holds a reference which is freed in
637 * vmw_user_fence_base_release.
639 tmp = vmw_fence_obj_reference(&ufence->fence);
640 ret = ttm_base_object_init(tfile, &ufence->base, false,
642 &vmw_user_fence_base_release, NULL);
645 if (unlikely(ret != 0)) {
647 * Free the base object's reference
649 vmw_fence_obj_unreference(&tmp);
653 *p_fence = &ufence->fence;
654 *p_handle = ufence->base.handle;
658 tmp = &ufence->fence;
659 vmw_fence_obj_unreference(&tmp);
661 ttm_mem_global_free(mem_glob, fman->user_fence_size);
667 * vmw_wait_dma_fence - Wait for a dma fence
669 * @fman: pointer to a fence manager
670 * @fence: DMA fence to wait on
672 * This function handles the case when the fence is actually a fence
673 * array. If that's the case, it'll wait on each of the child fence
675 int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
676 struct dma_fence *fence)
678 struct dma_fence_array *fence_array;
683 if (dma_fence_is_signaled(fence))
686 if (!dma_fence_is_array(fence))
687 return dma_fence_wait(fence, true);
689 /* From i915: Note that if the fence-array was created in
690 * signal-on-any mode, we should *not* decompose it into its individual
691 * fences. However, we don't currently store which mode the fence-array
692 * is operating in. Fortunately, the only user of signal-on-any is
693 * private to amdgpu and we should not see any incoming fence-array
694 * from sync-file being in signal-on-any mode.
697 fence_array = to_dma_fence_array(fence);
698 for (i = 0; i < fence_array->num_fences; i++) {
699 struct dma_fence *child = fence_array->fences[i];
701 ret = dma_fence_wait(child, true);
712 * vmw_fence_fifo_down - signal all unsignaled fence objects.
715 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
717 struct list_head action_list;
721 * The list may be altered while we traverse it, so always
722 * restart when we've released the fman->lock.
725 spin_lock(&fman->lock);
726 fman->fifo_down = true;
727 while (!list_empty(&fman->fence_list)) {
728 struct vmw_fence_obj *fence =
729 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
731 dma_fence_get(&fence->base);
732 spin_unlock(&fman->lock);
734 ret = vmw_fence_obj_wait(fence, false, false,
735 VMW_FENCE_WAIT_TIMEOUT);
737 if (unlikely(ret != 0)) {
738 list_del_init(&fence->head);
739 dma_fence_signal(&fence->base);
740 INIT_LIST_HEAD(&action_list);
741 list_splice_init(&fence->seq_passed_actions,
743 vmw_fences_perform_actions(fman, &action_list);
746 BUG_ON(!list_empty(&fence->head));
747 dma_fence_put(&fence->base);
748 spin_lock(&fman->lock);
750 spin_unlock(&fman->lock);
753 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
755 spin_lock(&fman->lock);
756 fman->fifo_down = false;
757 spin_unlock(&fman->lock);
762 * vmw_fence_obj_lookup - Look up a user-space fence object
764 * @tfile: A struct ttm_object_file identifying the caller.
765 * @handle: A handle identifying the fence object.
766 * @return: A struct vmw_user_fence base ttm object on success or
767 * an error pointer on failure.
769 * The fence object is looked up and type-checked. The caller needs
770 * to have opened the fence object first, but since that happens on
771 * creation and fence objects aren't shareable, that's not an
774 static struct ttm_base_object *
775 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
777 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
780 pr_err("Invalid fence object handle 0x%08lx.\n",
781 (unsigned long)handle);
782 return ERR_PTR(-EINVAL);
785 if (base->refcount_release != vmw_user_fence_base_release) {
786 pr_err("Invalid fence object handle 0x%08lx.\n",
787 (unsigned long)handle);
788 ttm_base_object_unref(&base);
789 return ERR_PTR(-EINVAL);
796 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
797 struct drm_file *file_priv)
799 struct drm_vmw_fence_wait_arg *arg =
800 (struct drm_vmw_fence_wait_arg *)data;
801 unsigned long timeout;
802 struct ttm_base_object *base;
803 struct vmw_fence_obj *fence;
804 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
806 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
809 * 64-bit division not present on 32-bit systems, so do an
810 * approximation. (Divide by 1000000).
813 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
814 (wait_timeout >> 26);
816 if (!arg->cookie_valid) {
817 arg->cookie_valid = 1;
818 arg->kernel_cookie = jiffies + wait_timeout;
821 base = vmw_fence_obj_lookup(tfile, arg->handle);
823 return PTR_ERR(base);
825 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
828 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
829 ret = ((vmw_fence_obj_signaled(fence)) ?
834 timeout = (unsigned long)arg->kernel_cookie - timeout;
836 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
839 ttm_base_object_unref(&base);
842 * Optionally unref the fence object.
845 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
846 return ttm_ref_object_base_unref(tfile, arg->handle,
851 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
852 struct drm_file *file_priv)
854 struct drm_vmw_fence_signaled_arg *arg =
855 (struct drm_vmw_fence_signaled_arg *) data;
856 struct ttm_base_object *base;
857 struct vmw_fence_obj *fence;
858 struct vmw_fence_manager *fman;
859 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
860 struct vmw_private *dev_priv = vmw_priv(dev);
862 base = vmw_fence_obj_lookup(tfile, arg->handle);
864 return PTR_ERR(base);
866 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
867 fman = fman_from_fence(fence);
869 arg->signaled = vmw_fence_obj_signaled(fence);
871 arg->signaled_flags = arg->flags;
872 spin_lock(&fman->lock);
873 arg->passed_seqno = dev_priv->last_read_seqno;
874 spin_unlock(&fman->lock);
876 ttm_base_object_unref(&base);
882 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
883 struct drm_file *file_priv)
885 struct drm_vmw_fence_arg *arg =
886 (struct drm_vmw_fence_arg *) data;
888 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
894 * vmw_event_fence_action_seq_passed
896 * @action: The struct vmw_fence_action embedded in a struct
897 * vmw_event_fence_action.
899 * This function is called when the seqno of the fence where @action is
900 * attached has passed. It queues the event on the submitter's event list.
901 * This function is always called from atomic context.
903 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
905 struct vmw_event_fence_action *eaction =
906 container_of(action, struct vmw_event_fence_action, action);
907 struct drm_device *dev = eaction->dev;
908 struct drm_pending_event *event = eaction->event;
910 if (unlikely(event == NULL))
913 spin_lock_irq(&dev->event_lock);
915 if (likely(eaction->tv_sec != NULL)) {
916 struct timespec64 ts;
919 /* monotonic time, so no y2038 overflow */
920 *eaction->tv_sec = ts.tv_sec;
921 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
924 drm_send_event_locked(dev, eaction->event);
925 eaction->event = NULL;
926 spin_unlock_irq(&dev->event_lock);
930 * vmw_event_fence_action_cleanup
932 * @action: The struct vmw_fence_action embedded in a struct
933 * vmw_event_fence_action.
935 * This function is the struct vmw_fence_action destructor. It's typically
936 * called from a workqueue.
938 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
940 struct vmw_event_fence_action *eaction =
941 container_of(action, struct vmw_event_fence_action, action);
943 vmw_fence_obj_unreference(&eaction->fence);
949 * vmw_fence_obj_add_action - Add an action to a fence object.
951 * @fence - The fence object.
952 * @action - The action to add.
954 * Note that the action callbacks may be executed before this function
957 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
958 struct vmw_fence_action *action)
960 struct vmw_fence_manager *fman = fman_from_fence(fence);
961 bool run_update = false;
963 mutex_lock(&fman->goal_irq_mutex);
964 spin_lock(&fman->lock);
966 fman->pending_actions[action->type]++;
967 if (dma_fence_is_signaled_locked(&fence->base)) {
968 struct list_head action_list;
970 INIT_LIST_HEAD(&action_list);
971 list_add_tail(&action->head, &action_list);
972 vmw_fences_perform_actions(fman, &action_list);
974 list_add_tail(&action->head, &fence->seq_passed_actions);
977 * This function may set fman::seqno_valid, so it must
978 * be run with the goal_irq_mutex held.
980 run_update = vmw_fence_goal_check_locked(fence);
983 spin_unlock(&fman->lock);
986 if (!fman->goal_irq_on) {
987 fman->goal_irq_on = true;
988 vmw_goal_waiter_add(fman->dev_priv);
990 vmw_fences_update(fman);
992 mutex_unlock(&fman->goal_irq_mutex);
997 * vmw_event_fence_action_create - Post an event for sending when a fence
998 * object seqno has passed.
1000 * @file_priv: The file connection on which the event should be posted.
1001 * @fence: The fence object on which to post the event.
1002 * @event: Event to be posted. This event should've been alloced
1003 * using k[mz]alloc, and should've been completely initialized.
1004 * @interruptible: Interruptible waits if possible.
1006 * As a side effect, the object pointed to by @event may have been
1007 * freed when this function returns. If this function returns with
1008 * an error code, the caller needs to free that object.
1011 int vmw_event_fence_action_queue(struct drm_file *file_priv,
1012 struct vmw_fence_obj *fence,
1013 struct drm_pending_event *event,
1018 struct vmw_event_fence_action *eaction;
1019 struct vmw_fence_manager *fman = fman_from_fence(fence);
1021 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1022 if (unlikely(!eaction))
1025 eaction->event = event;
1027 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1028 eaction->action.cleanup = vmw_event_fence_action_cleanup;
1029 eaction->action.type = VMW_ACTION_EVENT;
1031 eaction->fence = vmw_fence_obj_reference(fence);
1032 eaction->dev = fman->dev_priv->dev;
1033 eaction->tv_sec = tv_sec;
1034 eaction->tv_usec = tv_usec;
1036 vmw_fence_obj_add_action(fence, &eaction->action);
1041 struct vmw_event_fence_pending {
1042 struct drm_pending_event base;
1043 struct drm_vmw_event_fence event;
1046 static int vmw_event_fence_action_create(struct drm_file *file_priv,
1047 struct vmw_fence_obj *fence,
1052 struct vmw_event_fence_pending *event;
1053 struct vmw_fence_manager *fman = fman_from_fence(fence);
1054 struct drm_device *dev = fman->dev_priv->dev;
1057 event = kzalloc(sizeof(*event), GFP_KERNEL);
1058 if (unlikely(!event)) {
1059 DRM_ERROR("Failed to allocate an event.\n");
1064 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1065 event->event.base.length = sizeof(*event);
1066 event->event.user_data = user_data;
1068 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1070 if (unlikely(ret != 0)) {
1071 DRM_ERROR("Failed to allocate event space for this file.\n");
1076 if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1077 ret = vmw_event_fence_action_queue(file_priv, fence,
1079 &event->event.tv_sec,
1080 &event->event.tv_usec,
1083 ret = vmw_event_fence_action_queue(file_priv, fence,
1094 drm_event_cancel_free(dev, &event->base);
1099 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1100 struct drm_file *file_priv)
1102 struct vmw_private *dev_priv = vmw_priv(dev);
1103 struct drm_vmw_fence_event_arg *arg =
1104 (struct drm_vmw_fence_event_arg *) data;
1105 struct vmw_fence_obj *fence = NULL;
1106 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1107 struct ttm_object_file *tfile = vmw_fp->tfile;
1108 struct drm_vmw_fence_rep __user *user_fence_rep =
1109 (struct drm_vmw_fence_rep __user *)(unsigned long)
1115 * Look up an existing fence object,
1116 * and if user-space wants a new reference,
1120 struct ttm_base_object *base =
1121 vmw_fence_obj_lookup(tfile, arg->handle);
1124 return PTR_ERR(base);
1126 fence = &(container_of(base, struct vmw_user_fence,
1128 (void) vmw_fence_obj_reference(fence);
1130 if (user_fence_rep != NULL) {
1131 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1132 TTM_REF_USAGE, NULL, false);
1133 if (unlikely(ret != 0)) {
1134 DRM_ERROR("Failed to reference a fence "
1136 goto out_no_ref_obj;
1138 handle = base->handle;
1140 ttm_base_object_unref(&base);
1144 * Create a new fence object.
1147 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1151 if (unlikely(ret != 0)) {
1152 DRM_ERROR("Fence event failed to create fence.\n");
1157 BUG_ON(fence == NULL);
1159 ret = vmw_event_fence_action_create(file_priv, fence,
1163 if (unlikely(ret != 0)) {
1164 if (ret != -ERESTARTSYS)
1165 DRM_ERROR("Failed to attach event to fence.\n");
1169 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1171 vmw_fence_obj_unreference(&fence);
1174 if (user_fence_rep != NULL)
1175 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1177 vmw_fence_obj_unreference(&fence);