1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <linux/sched/signal.h>
30 #include "vmwgfx_drv.h"
32 #define VMW_FENCE_WRAP (1 << 31)
34 struct vmw_fence_manager {
35 int num_fence_objects;
36 struct vmw_private *dev_priv;
38 struct list_head fence_list;
39 struct work_struct work;
41 struct list_head cleanup_list;
42 uint32_t pending_actions[VMW_ACTION_MAX];
43 struct mutex goal_irq_mutex;
44 bool goal_irq_on; /* Protected by @goal_irq_mutex */
45 bool seqno_valid; /* Protected by @lock, and may not be set to true
46 without the @goal_irq_mutex held. */
50 struct vmw_user_fence {
51 struct ttm_base_object base;
52 struct vmw_fence_obj fence;
56 * struct vmw_event_fence_action - fence action that delivers a drm event.
58 * @action: A struct vmw_fence_action to hook up to a fence.
59 * @event: A pointer to the pending event.
60 * @fence: A referenced pointer to the fence to keep it alive while @action
62 * @dev: Pointer to a struct drm_device so we can access the event stuff.
63 * @tv_sec: If non-null, the variable pointed to will be assigned
64 * current time tv_sec val when the fence signals.
65 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
66 * be assigned the current time tv_usec val when the fence signals.
68 struct vmw_event_fence_action {
69 struct vmw_fence_action action;
71 struct drm_pending_event *event;
72 struct vmw_fence_obj *fence;
73 struct drm_device *dev;
79 static struct vmw_fence_manager *
80 fman_from_fence(struct vmw_fence_obj *fence)
82 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
86 * Note on fencing subsystem usage of irqs:
87 * Typically the vmw_fences_update function is called
89 * a) When a new fence seqno has been submitted by the fifo code.
90 * b) On-demand when we have waiters. Sleeping waiters will switch on the
91 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
92 * irq is received. When the last fence waiter is gone, that IRQ is masked
95 * In situations where there are no waiters and we don't submit any new fences,
96 * fence objects may not be signaled. This is perfectly OK, since there are
97 * no consumers of the signaled data, but that is NOT ok when there are fence
98 * actions attached to a fence. The fencing subsystem then makes use of the
99 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
100 * which has an action attached, and each time vmw_fences_update is called,
101 * the subsystem makes sure the fence goal seqno is updated.
103 * The fence goal seqno irq is on as long as there are unsignaled fence
104 * objects with actions attached to them.
107 static void vmw_fence_obj_destroy(struct dma_fence *f)
109 struct vmw_fence_obj *fence =
110 container_of(f, struct vmw_fence_obj, base);
112 struct vmw_fence_manager *fman = fman_from_fence(fence);
114 spin_lock(&fman->lock);
115 list_del_init(&fence->head);
116 --fman->num_fence_objects;
117 spin_unlock(&fman->lock);
118 fence->destroy(fence);
121 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
126 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
131 static bool vmw_fence_enable_signaling(struct dma_fence *f)
133 struct vmw_fence_obj *fence =
134 container_of(f, struct vmw_fence_obj, base);
136 struct vmw_fence_manager *fman = fman_from_fence(fence);
137 struct vmw_private *dev_priv = fman->dev_priv;
139 u32 seqno = vmw_fence_read(dev_priv);
140 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
146 struct vmwgfx_wait_cb {
147 struct dma_fence_cb base;
148 struct task_struct *task;
152 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
154 struct vmwgfx_wait_cb *wait =
155 container_of(cb, struct vmwgfx_wait_cb, base);
157 wake_up_process(wait->task);
160 static void __vmw_fences_update(struct vmw_fence_manager *fman);
162 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
164 struct vmw_fence_obj *fence =
165 container_of(f, struct vmw_fence_obj, base);
167 struct vmw_fence_manager *fman = fman_from_fence(fence);
168 struct vmw_private *dev_priv = fman->dev_priv;
169 struct vmwgfx_wait_cb cb;
172 if (likely(vmw_fence_obj_signaled(fence)))
175 vmw_seqno_waiter_add(dev_priv);
179 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
182 if (intr && signal_pending(current)) {
187 cb.base.func = vmwgfx_wait_cb;
189 list_add(&cb.base.node, &f->cb_list);
192 __vmw_fences_update(fman);
195 * We can use the barrier free __set_current_state() since
196 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
200 __set_current_state(TASK_INTERRUPTIBLE);
202 __set_current_state(TASK_UNINTERRUPTIBLE);
204 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
205 if (ret == 0 && timeout > 0)
210 if (intr && signal_pending(current)) {
218 spin_unlock(f->lock);
220 ret = schedule_timeout(ret);
224 __set_current_state(TASK_RUNNING);
225 if (!list_empty(&cb.base.node))
226 list_del(&cb.base.node);
229 spin_unlock(f->lock);
231 vmw_seqno_waiter_remove(dev_priv);
236 static const struct dma_fence_ops vmw_fence_ops = {
237 .get_driver_name = vmw_fence_get_driver_name,
238 .get_timeline_name = vmw_fence_get_timeline_name,
239 .enable_signaling = vmw_fence_enable_signaling,
240 .wait = vmw_fence_wait,
241 .release = vmw_fence_obj_destroy,
246 * Execute signal actions on fences recently signaled.
247 * This is done from a workqueue so we don't have to execute
248 * signal actions from atomic context.
251 static void vmw_fence_work_func(struct work_struct *work)
253 struct vmw_fence_manager *fman =
254 container_of(work, struct vmw_fence_manager, work);
255 struct list_head list;
256 struct vmw_fence_action *action, *next_action;
260 INIT_LIST_HEAD(&list);
261 mutex_lock(&fman->goal_irq_mutex);
263 spin_lock(&fman->lock);
264 list_splice_init(&fman->cleanup_list, &list);
265 seqno_valid = fman->seqno_valid;
266 spin_unlock(&fman->lock);
268 if (!seqno_valid && fman->goal_irq_on) {
269 fman->goal_irq_on = false;
270 vmw_goal_waiter_remove(fman->dev_priv);
272 mutex_unlock(&fman->goal_irq_mutex);
274 if (list_empty(&list))
278 * At this point, only we should be able to manipulate the
279 * list heads of the actions we have on the private list.
280 * hence fman::lock not held.
283 list_for_each_entry_safe(action, next_action, &list, head) {
284 list_del_init(&action->head);
286 action->cleanup(action);
291 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
293 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
298 fman->dev_priv = dev_priv;
299 spin_lock_init(&fman->lock);
300 INIT_LIST_HEAD(&fman->fence_list);
301 INIT_LIST_HEAD(&fman->cleanup_list);
302 INIT_WORK(&fman->work, &vmw_fence_work_func);
303 fman->fifo_down = true;
304 mutex_init(&fman->goal_irq_mutex);
305 fman->ctx = dma_fence_context_alloc(1);
310 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
314 (void) cancel_work_sync(&fman->work);
316 spin_lock(&fman->lock);
317 lists_empty = list_empty(&fman->fence_list) &&
318 list_empty(&fman->cleanup_list);
319 spin_unlock(&fman->lock);
321 BUG_ON(!lists_empty);
325 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
326 struct vmw_fence_obj *fence, u32 seqno,
327 void (*destroy) (struct vmw_fence_obj *fence))
331 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
333 INIT_LIST_HEAD(&fence->seq_passed_actions);
334 fence->destroy = destroy;
336 spin_lock(&fman->lock);
337 if (unlikely(fman->fifo_down)) {
341 list_add_tail(&fence->head, &fman->fence_list);
342 ++fman->num_fence_objects;
345 spin_unlock(&fman->lock);
350 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
351 struct list_head *list)
353 struct vmw_fence_action *action, *next_action;
355 list_for_each_entry_safe(action, next_action, list, head) {
356 list_del_init(&action->head);
357 fman->pending_actions[action->type]--;
358 if (action->seq_passed != NULL)
359 action->seq_passed(action);
362 * Add the cleanup action to the cleanup list so that
363 * it will be performed by a worker task.
366 list_add_tail(&action->head, &fman->cleanup_list);
371 * vmw_fence_goal_new_locked - Figure out a new device fence goal
374 * @fman: Pointer to a fence manager.
375 * @passed_seqno: The seqno the device currently signals as passed.
377 * This function should be called with the fence manager lock held.
378 * It is typically called when we have a new passed_seqno, and
379 * we might need to update the fence goal. It checks to see whether
380 * the current fence goal has already passed, and, in that case,
381 * scans through all unsignaled fences to get the next fence object with an
382 * action attached, and sets the seqno of that fence as a new fence goal.
384 * returns true if the device goal seqno was updated. False otherwise.
386 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
390 struct vmw_fence_obj *fence;
392 if (likely(!fman->seqno_valid))
395 goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
396 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
399 fman->seqno_valid = false;
400 list_for_each_entry(fence, &fman->fence_list, head) {
401 if (!list_empty(&fence->seq_passed_actions)) {
402 fman->seqno_valid = true;
403 vmw_fifo_mem_write(fman->dev_priv,
404 SVGA_FIFO_FENCE_GOAL,
415 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
418 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
419 * considered as a device fence goal.
421 * This function should be called with the fence manager lock held.
422 * It is typically called when an action has been attached to a fence to
423 * check whether the seqno of that fence should be used for a fence
424 * goal interrupt. This is typically needed if the current fence goal is
425 * invalid, or has a higher seqno than that of the current fence object.
427 * returns true if the device goal seqno was updated. False otherwise.
429 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
431 struct vmw_fence_manager *fman = fman_from_fence(fence);
434 if (dma_fence_is_signaled_locked(&fence->base))
437 goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
438 if (likely(fman->seqno_valid &&
439 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
442 vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
444 fman->seqno_valid = true;
449 static void __vmw_fences_update(struct vmw_fence_manager *fman)
451 struct vmw_fence_obj *fence, *next_fence;
452 struct list_head action_list;
454 uint32_t seqno, new_seqno;
456 seqno = vmw_fence_read(fman->dev_priv);
458 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
459 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
460 list_del_init(&fence->head);
461 dma_fence_signal_locked(&fence->base);
462 INIT_LIST_HEAD(&action_list);
463 list_splice_init(&fence->seq_passed_actions,
465 vmw_fences_perform_actions(fman, &action_list);
471 * Rerun if the fence goal seqno was updated, and the
472 * hardware might have raced with that update, so that
473 * we missed a fence_goal irq.
476 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
477 if (unlikely(needs_rerun)) {
478 new_seqno = vmw_fence_read(fman->dev_priv);
479 if (new_seqno != seqno) {
485 if (!list_empty(&fman->cleanup_list))
486 (void) schedule_work(&fman->work);
489 void vmw_fences_update(struct vmw_fence_manager *fman)
491 spin_lock(&fman->lock);
492 __vmw_fences_update(fman);
493 spin_unlock(&fman->lock);
496 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
498 struct vmw_fence_manager *fman = fman_from_fence(fence);
500 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
503 vmw_fences_update(fman);
505 return dma_fence_is_signaled(&fence->base);
508 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
509 bool interruptible, unsigned long timeout)
511 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
521 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
523 dma_fence_free(&fence->base);
526 int vmw_fence_create(struct vmw_fence_manager *fman,
528 struct vmw_fence_obj **p_fence)
530 struct vmw_fence_obj *fence;
533 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
534 if (unlikely(!fence))
537 ret = vmw_fence_obj_init(fman, fence, seqno,
539 if (unlikely(ret != 0))
551 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
553 struct vmw_user_fence *ufence =
554 container_of(fence, struct vmw_user_fence, fence);
556 ttm_base_object_kfree(ufence, base);
559 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
561 struct ttm_base_object *base = *p_base;
562 struct vmw_user_fence *ufence =
563 container_of(base, struct vmw_user_fence, base);
564 struct vmw_fence_obj *fence = &ufence->fence;
567 vmw_fence_obj_unreference(&fence);
570 int vmw_user_fence_create(struct drm_file *file_priv,
571 struct vmw_fence_manager *fman,
573 struct vmw_fence_obj **p_fence,
576 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
577 struct vmw_user_fence *ufence;
578 struct vmw_fence_obj *tmp;
581 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
582 if (unlikely(!ufence)) {
587 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
588 vmw_user_fence_destroy);
589 if (unlikely(ret != 0)) {
595 * The base object holds a reference which is freed in
596 * vmw_user_fence_base_release.
598 tmp = vmw_fence_obj_reference(&ufence->fence);
600 ret = ttm_base_object_init(tfile, &ufence->base, false,
602 &vmw_user_fence_base_release);
605 if (unlikely(ret != 0)) {
607 * Free the base object's reference
609 vmw_fence_obj_unreference(&tmp);
613 *p_fence = &ufence->fence;
614 *p_handle = ufence->base.handle;
618 tmp = &ufence->fence;
619 vmw_fence_obj_unreference(&tmp);
626 * vmw_wait_dma_fence - Wait for a dma fence
628 * @fman: pointer to a fence manager
629 * @fence: DMA fence to wait on
631 * This function handles the case when the fence is actually a fence
632 * array. If that's the case, it'll wait on each of the child fence
634 int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
635 struct dma_fence *fence)
637 struct dma_fence_array *fence_array;
642 if (dma_fence_is_signaled(fence))
645 if (!dma_fence_is_array(fence))
646 return dma_fence_wait(fence, true);
648 /* From i915: Note that if the fence-array was created in
649 * signal-on-any mode, we should *not* decompose it into its individual
650 * fences. However, we don't currently store which mode the fence-array
651 * is operating in. Fortunately, the only user of signal-on-any is
652 * private to amdgpu and we should not see any incoming fence-array
653 * from sync-file being in signal-on-any mode.
656 fence_array = to_dma_fence_array(fence);
657 for (i = 0; i < fence_array->num_fences; i++) {
658 struct dma_fence *child = fence_array->fences[i];
660 ret = dma_fence_wait(child, true);
671 * vmw_fence_fifo_down - signal all unsignaled fence objects.
674 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
676 struct list_head action_list;
680 * The list may be altered while we traverse it, so always
681 * restart when we've released the fman->lock.
684 spin_lock(&fman->lock);
685 fman->fifo_down = true;
686 while (!list_empty(&fman->fence_list)) {
687 struct vmw_fence_obj *fence =
688 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
690 dma_fence_get(&fence->base);
691 spin_unlock(&fman->lock);
693 ret = vmw_fence_obj_wait(fence, false, false,
694 VMW_FENCE_WAIT_TIMEOUT);
696 if (unlikely(ret != 0)) {
697 list_del_init(&fence->head);
698 dma_fence_signal(&fence->base);
699 INIT_LIST_HEAD(&action_list);
700 list_splice_init(&fence->seq_passed_actions,
702 vmw_fences_perform_actions(fman, &action_list);
705 BUG_ON(!list_empty(&fence->head));
706 dma_fence_put(&fence->base);
707 spin_lock(&fman->lock);
709 spin_unlock(&fman->lock);
712 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
714 spin_lock(&fman->lock);
715 fman->fifo_down = false;
716 spin_unlock(&fman->lock);
721 * vmw_fence_obj_lookup - Look up a user-space fence object
723 * @tfile: A struct ttm_object_file identifying the caller.
724 * @handle: A handle identifying the fence object.
725 * @return: A struct vmw_user_fence base ttm object on success or
726 * an error pointer on failure.
728 * The fence object is looked up and type-checked. The caller needs
729 * to have opened the fence object first, but since that happens on
730 * creation and fence objects aren't shareable, that's not an
733 static struct ttm_base_object *
734 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
736 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
739 pr_err("Invalid fence object handle 0x%08lx.\n",
740 (unsigned long)handle);
741 return ERR_PTR(-EINVAL);
744 if (base->refcount_release != vmw_user_fence_base_release) {
745 pr_err("Invalid fence object handle 0x%08lx.\n",
746 (unsigned long)handle);
747 ttm_base_object_unref(&base);
748 return ERR_PTR(-EINVAL);
755 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
756 struct drm_file *file_priv)
758 struct drm_vmw_fence_wait_arg *arg =
759 (struct drm_vmw_fence_wait_arg *)data;
760 unsigned long timeout;
761 struct ttm_base_object *base;
762 struct vmw_fence_obj *fence;
763 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
765 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
768 * 64-bit division not present on 32-bit systems, so do an
769 * approximation. (Divide by 1000000).
772 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
773 (wait_timeout >> 26);
775 if (!arg->cookie_valid) {
776 arg->cookie_valid = 1;
777 arg->kernel_cookie = jiffies + wait_timeout;
780 base = vmw_fence_obj_lookup(tfile, arg->handle);
782 return PTR_ERR(base);
784 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
787 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
788 ret = ((vmw_fence_obj_signaled(fence)) ?
793 timeout = (unsigned long)arg->kernel_cookie - timeout;
795 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
798 ttm_base_object_unref(&base);
801 * Optionally unref the fence object.
804 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
805 return ttm_ref_object_base_unref(tfile, arg->handle);
809 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
810 struct drm_file *file_priv)
812 struct drm_vmw_fence_signaled_arg *arg =
813 (struct drm_vmw_fence_signaled_arg *) data;
814 struct ttm_base_object *base;
815 struct vmw_fence_obj *fence;
816 struct vmw_fence_manager *fman;
817 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
818 struct vmw_private *dev_priv = vmw_priv(dev);
820 base = vmw_fence_obj_lookup(tfile, arg->handle);
822 return PTR_ERR(base);
824 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
825 fman = fman_from_fence(fence);
827 arg->signaled = vmw_fence_obj_signaled(fence);
829 arg->signaled_flags = arg->flags;
830 spin_lock(&fman->lock);
831 arg->passed_seqno = dev_priv->last_read_seqno;
832 spin_unlock(&fman->lock);
834 ttm_base_object_unref(&base);
840 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
841 struct drm_file *file_priv)
843 struct drm_vmw_fence_arg *arg =
844 (struct drm_vmw_fence_arg *) data;
846 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
851 * vmw_event_fence_action_seq_passed
853 * @action: The struct vmw_fence_action embedded in a struct
854 * vmw_event_fence_action.
856 * This function is called when the seqno of the fence where @action is
857 * attached has passed. It queues the event on the submitter's event list.
858 * This function is always called from atomic context.
860 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
862 struct vmw_event_fence_action *eaction =
863 container_of(action, struct vmw_event_fence_action, action);
864 struct drm_device *dev = eaction->dev;
865 struct drm_pending_event *event = eaction->event;
867 if (unlikely(event == NULL))
870 spin_lock_irq(&dev->event_lock);
872 if (likely(eaction->tv_sec != NULL)) {
873 struct timespec64 ts;
876 /* monotonic time, so no y2038 overflow */
877 *eaction->tv_sec = ts.tv_sec;
878 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
881 drm_send_event_locked(dev, eaction->event);
882 eaction->event = NULL;
883 spin_unlock_irq(&dev->event_lock);
887 * vmw_event_fence_action_cleanup
889 * @action: The struct vmw_fence_action embedded in a struct
890 * vmw_event_fence_action.
892 * This function is the struct vmw_fence_action destructor. It's typically
893 * called from a workqueue.
895 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
897 struct vmw_event_fence_action *eaction =
898 container_of(action, struct vmw_event_fence_action, action);
900 vmw_fence_obj_unreference(&eaction->fence);
906 * vmw_fence_obj_add_action - Add an action to a fence object.
908 * @fence: The fence object.
909 * @action: The action to add.
911 * Note that the action callbacks may be executed before this function
914 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
915 struct vmw_fence_action *action)
917 struct vmw_fence_manager *fman = fman_from_fence(fence);
918 bool run_update = false;
920 mutex_lock(&fman->goal_irq_mutex);
921 spin_lock(&fman->lock);
923 fman->pending_actions[action->type]++;
924 if (dma_fence_is_signaled_locked(&fence->base)) {
925 struct list_head action_list;
927 INIT_LIST_HEAD(&action_list);
928 list_add_tail(&action->head, &action_list);
929 vmw_fences_perform_actions(fman, &action_list);
931 list_add_tail(&action->head, &fence->seq_passed_actions);
934 * This function may set fman::seqno_valid, so it must
935 * be run with the goal_irq_mutex held.
937 run_update = vmw_fence_goal_check_locked(fence);
940 spin_unlock(&fman->lock);
943 if (!fman->goal_irq_on) {
944 fman->goal_irq_on = true;
945 vmw_goal_waiter_add(fman->dev_priv);
947 vmw_fences_update(fman);
949 mutex_unlock(&fman->goal_irq_mutex);
954 * vmw_event_fence_action_queue - Post an event for sending when a fence
955 * object seqno has passed.
957 * @file_priv: The file connection on which the event should be posted.
958 * @fence: The fence object on which to post the event.
959 * @event: Event to be posted. This event should've been alloced
960 * using k[mz]alloc, and should've been completely initialized.
961 * @tv_sec: If non-null, the variable pointed to will be assigned
962 * current time tv_sec val when the fence signals.
963 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
964 * be assigned the current time tv_usec val when the fence signals.
965 * @interruptible: Interruptible waits if possible.
967 * As a side effect, the object pointed to by @event may have been
968 * freed when this function returns. If this function returns with
969 * an error code, the caller needs to free that object.
972 int vmw_event_fence_action_queue(struct drm_file *file_priv,
973 struct vmw_fence_obj *fence,
974 struct drm_pending_event *event,
979 struct vmw_event_fence_action *eaction;
980 struct vmw_fence_manager *fman = fman_from_fence(fence);
982 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
983 if (unlikely(!eaction))
986 eaction->event = event;
988 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
989 eaction->action.cleanup = vmw_event_fence_action_cleanup;
990 eaction->action.type = VMW_ACTION_EVENT;
992 eaction->fence = vmw_fence_obj_reference(fence);
993 eaction->dev = &fman->dev_priv->drm;
994 eaction->tv_sec = tv_sec;
995 eaction->tv_usec = tv_usec;
997 vmw_fence_obj_add_action(fence, &eaction->action);
1002 struct vmw_event_fence_pending {
1003 struct drm_pending_event base;
1004 struct drm_vmw_event_fence event;
1007 static int vmw_event_fence_action_create(struct drm_file *file_priv,
1008 struct vmw_fence_obj *fence,
1013 struct vmw_event_fence_pending *event;
1014 struct vmw_fence_manager *fman = fman_from_fence(fence);
1015 struct drm_device *dev = &fman->dev_priv->drm;
1018 event = kzalloc(sizeof(*event), GFP_KERNEL);
1019 if (unlikely(!event)) {
1020 DRM_ERROR("Failed to allocate an event.\n");
1025 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1026 event->event.base.length = sizeof(*event);
1027 event->event.user_data = user_data;
1029 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1031 if (unlikely(ret != 0)) {
1032 DRM_ERROR("Failed to allocate event space for this file.\n");
1037 if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1038 ret = vmw_event_fence_action_queue(file_priv, fence,
1040 &event->event.tv_sec,
1041 &event->event.tv_usec,
1044 ret = vmw_event_fence_action_queue(file_priv, fence,
1055 drm_event_cancel_free(dev, &event->base);
1060 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1061 struct drm_file *file_priv)
1063 struct vmw_private *dev_priv = vmw_priv(dev);
1064 struct drm_vmw_fence_event_arg *arg =
1065 (struct drm_vmw_fence_event_arg *) data;
1066 struct vmw_fence_obj *fence = NULL;
1067 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1068 struct ttm_object_file *tfile = vmw_fp->tfile;
1069 struct drm_vmw_fence_rep __user *user_fence_rep =
1070 (struct drm_vmw_fence_rep __user *)(unsigned long)
1076 * Look up an existing fence object,
1077 * and if user-space wants a new reference,
1081 struct ttm_base_object *base =
1082 vmw_fence_obj_lookup(tfile, arg->handle);
1085 return PTR_ERR(base);
1087 fence = &(container_of(base, struct vmw_user_fence,
1089 (void) vmw_fence_obj_reference(fence);
1091 if (user_fence_rep != NULL) {
1092 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1094 if (unlikely(ret != 0)) {
1095 DRM_ERROR("Failed to reference a fence "
1097 goto out_no_ref_obj;
1099 handle = base->handle;
1101 ttm_base_object_unref(&base);
1105 * Create a new fence object.
1108 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1112 if (unlikely(ret != 0)) {
1113 DRM_ERROR("Fence event failed to create fence.\n");
1118 BUG_ON(fence == NULL);
1120 ret = vmw_event_fence_action_create(file_priv, fence,
1124 if (unlikely(ret != 0)) {
1125 if (ret != -ERESTARTSYS)
1126 DRM_ERROR("Failed to attach event to fence.\n");
1130 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1132 vmw_fence_obj_unreference(&fence);
1135 if (user_fence_rep != NULL)
1136 ttm_ref_object_base_unref(tfile, handle);
1138 vmw_fence_obj_unreference(&fence);