1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_placement.h>
30 #include "vmwgfx_resource_priv.h"
31 #include "vmwgfx_binding.h"
32 #include "vmwgfx_drv.h"
34 #define VMW_RES_EVICT_ERR_COUNT 10
37 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
40 void vmw_resource_mob_attach(struct vmw_resource *res)
42 struct vmw_buffer_object *backup = res->backup;
43 struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
45 dma_resv_assert_held(res->backup->base.base.resv);
46 res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
50 struct vmw_resource *this =
51 container_of(*new, struct vmw_resource, mob_node);
54 new = (res->backup_offset < this->backup_offset) ?
55 &((*new)->rb_left) : &((*new)->rb_right);
58 rb_link_node(&res->mob_node, parent, new);
59 rb_insert_color(&res->mob_node, &backup->res_tree);
61 vmw_bo_prio_add(backup, res->used_prio);
65 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
68 void vmw_resource_mob_detach(struct vmw_resource *res)
70 struct vmw_buffer_object *backup = res->backup;
72 dma_resv_assert_held(backup->base.base.resv);
73 if (vmw_resource_mob_attached(res)) {
74 rb_erase(&res->mob_node, &backup->res_tree);
75 RB_CLEAR_NODE(&res->mob_node);
76 vmw_bo_prio_del(backup, res->used_prio);
80 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
87 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
89 return kref_get_unless_zero(&res->kref) ? res : NULL;
93 * vmw_resource_release_id - release a resource id to the id manager.
95 * @res: Pointer to the resource.
97 * Release the resource id to the resource id manager and set it to -1
99 void vmw_resource_release_id(struct vmw_resource *res)
101 struct vmw_private *dev_priv = res->dev_priv;
102 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
104 spin_lock(&dev_priv->resource_lock);
106 idr_remove(idr, res->id);
108 spin_unlock(&dev_priv->resource_lock);
111 static void vmw_resource_release(struct kref *kref)
113 struct vmw_resource *res =
114 container_of(kref, struct vmw_resource, kref);
115 struct vmw_private *dev_priv = res->dev_priv;
117 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
119 spin_lock(&dev_priv->resource_lock);
120 list_del_init(&res->lru_head);
121 spin_unlock(&dev_priv->resource_lock);
123 struct ttm_buffer_object *bo = &res->backup->base;
125 ttm_bo_reserve(bo, false, false, NULL);
126 if (vmw_resource_mob_attached(res) &&
127 res->func->unbind != NULL) {
128 struct ttm_validate_buffer val_buf;
131 val_buf.num_shared = 0;
132 res->func->unbind(res, false, &val_buf);
134 res->backup_dirty = false;
135 vmw_resource_mob_detach(res);
137 res->func->dirty_free(res);
139 vmw_bo_dirty_release(res->backup);
140 ttm_bo_unreserve(bo);
141 vmw_bo_unreference(&res->backup);
144 if (likely(res->hw_destroy != NULL)) {
145 mutex_lock(&dev_priv->binding_mutex);
146 vmw_binding_res_list_kill(&res->binding_head);
147 mutex_unlock(&dev_priv->binding_mutex);
148 res->hw_destroy(res);
152 if (res->res_free != NULL)
157 spin_lock(&dev_priv->resource_lock);
160 spin_unlock(&dev_priv->resource_lock);
163 void vmw_resource_unreference(struct vmw_resource **p_res)
165 struct vmw_resource *res = *p_res;
168 kref_put(&res->kref, vmw_resource_release);
173 * vmw_resource_alloc_id - release a resource id to the id manager.
175 * @res: Pointer to the resource.
177 * Allocate the lowest free resource from the resource manager, and set
178 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
180 int vmw_resource_alloc_id(struct vmw_resource *res)
182 struct vmw_private *dev_priv = res->dev_priv;
184 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
186 BUG_ON(res->id != -1);
188 idr_preload(GFP_KERNEL);
189 spin_lock(&dev_priv->resource_lock);
191 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
195 spin_unlock(&dev_priv->resource_lock);
197 return ret < 0 ? ret : 0;
201 * vmw_resource_init - initialize a struct vmw_resource
203 * @dev_priv: Pointer to a device private struct.
204 * @res: The struct vmw_resource to initialize.
205 * @delay_id: Boolean whether to defer device id allocation until
206 * the first validation.
207 * @res_free: Resource destructor.
208 * @func: Resource function table.
210 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
212 void (*res_free) (struct vmw_resource *res),
213 const struct vmw_res_func *func)
215 kref_init(&res->kref);
216 res->hw_destroy = NULL;
217 res->res_free = res_free;
218 res->dev_priv = dev_priv;
220 RB_CLEAR_NODE(&res->mob_node);
221 INIT_LIST_HEAD(&res->lru_head);
222 INIT_LIST_HEAD(&res->binding_head);
225 res->backup_offset = 0;
226 res->backup_dirty = false;
227 res->res_dirty = false;
228 res->coherent = false;
234 return vmw_resource_alloc_id(res);
239 * vmw_user_resource_lookup_handle - lookup a struct resource from a
240 * TTM user-space handle and perform basic type checks
242 * @dev_priv: Pointer to a device private struct
243 * @tfile: Pointer to a struct ttm_object_file identifying the caller
244 * @handle: The TTM user-space handle
245 * @converter: Pointer to an object describing the resource type
246 * @p_res: On successful return the location pointed to will contain
247 * a pointer to a refcounted struct vmw_resource.
249 * If the handle can't be found or is associated with an incorrect resource
250 * type, -EINVAL will be returned.
252 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
253 struct ttm_object_file *tfile,
255 const struct vmw_user_resource_conv
257 struct vmw_resource **p_res)
259 struct ttm_base_object *base;
260 struct vmw_resource *res;
263 base = ttm_base_object_lookup(tfile, handle);
264 if (unlikely(base == NULL))
267 if (unlikely(ttm_base_object_type(base) != converter->object_type))
268 goto out_bad_resource;
270 res = converter->base_obj_to_res(base);
271 kref_get(&res->kref);
277 ttm_base_object_unref(&base);
283 * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
284 * TTM user-space handle and perform basic type checks
286 * @dev_priv: Pointer to a device private struct
287 * @tfile: Pointer to a struct ttm_object_file identifying the caller
288 * @handle: The TTM user-space handle
289 * @converter: Pointer to an object describing the resource type
291 * If the handle can't be found or is associated with an incorrect resource
292 * type, -EINVAL will be returned.
294 struct vmw_resource *
295 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
296 struct ttm_object_file *tfile,
298 const struct vmw_user_resource_conv
301 struct ttm_base_object *base;
303 base = ttm_base_object_noref_lookup(tfile, handle);
305 return ERR_PTR(-ESRCH);
307 if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
308 ttm_base_object_noref_release();
309 return ERR_PTR(-EINVAL);
312 return converter->base_obj_to_res(base);
316 * Helper function that looks either a surface or bo.
318 * The pointer this pointed at by out_surf and out_buf needs to be null.
320 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
321 struct ttm_object_file *tfile,
323 struct vmw_surface **out_surf,
324 struct vmw_buffer_object **out_buf)
326 struct vmw_resource *res;
329 BUG_ON(*out_surf || *out_buf);
331 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
332 user_surface_converter,
335 *out_surf = vmw_res_to_srf(res);
340 ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
345 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
347 * @res: The resource for which to allocate a backup buffer.
348 * @interruptible: Whether any sleeps during allocation should be
349 * performed while interruptible.
351 static int vmw_resource_buf_alloc(struct vmw_resource *res,
355 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
356 struct vmw_buffer_object *backup;
359 if (likely(res->backup)) {
360 BUG_ON(res->backup->base.base.size < size);
364 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
365 if (unlikely(!backup))
368 ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
369 res->func->backup_placement,
370 interruptible, false,
372 if (unlikely(ret != 0))
375 res->backup = backup;
382 * vmw_resource_do_validate - Make a resource up-to-date and visible
385 * @res: The resource to make visible to the device.
386 * @val_buf: Information about a buffer possibly
387 * containing backup data if a bind operation is needed.
388 * @dirtying: Transfer dirty regions.
390 * On hardware resource shortage, this function returns -EBUSY and
391 * should be retried once resources have been freed up.
393 static int vmw_resource_do_validate(struct vmw_resource *res,
394 struct ttm_validate_buffer *val_buf,
398 const struct vmw_res_func *func = res->func;
400 if (unlikely(res->id == -1)) {
401 ret = func->create(res);
402 if (unlikely(ret != 0))
407 ((func->needs_backup && !vmw_resource_mob_attached(res) &&
408 val_buf->bo != NULL) ||
409 (!func->needs_backup && val_buf->bo != NULL))) {
410 ret = func->bind(res, val_buf);
411 if (unlikely(ret != 0))
412 goto out_bind_failed;
413 if (func->needs_backup)
414 vmw_resource_mob_attach(res);
418 * Handle the case where the backup mob is marked coherent but
419 * the resource isn't.
421 if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
423 if (res->backup->dirty && !res->dirty) {
424 ret = func->dirty_alloc(res);
427 } else if (!res->backup->dirty && res->dirty) {
428 func->dirty_free(res);
433 * Transfer the dirty regions to the resource and update
437 if (dirtying && !res->res_dirty) {
438 pgoff_t start = res->backup_offset >> PAGE_SHIFT;
439 pgoff_t end = __KERNEL_DIV_ROUND_UP
440 (res->backup_offset + res->backup_size,
443 vmw_bo_dirty_unmap(res->backup, start, end);
446 vmw_bo_dirty_transfer_to_res(res);
447 return func->dirty_sync(res);
459 * vmw_resource_unreserve - Unreserve a resource previously reserved for
460 * command submission.
462 * @res: Pointer to the struct vmw_resource to unreserve.
463 * @dirty_set: Change dirty status of the resource.
464 * @dirty: When changing dirty status indicates the new status.
465 * @switch_backup: Backup buffer has been switched.
466 * @new_backup: Pointer to new backup buffer if command submission
467 * switched. May be NULL.
468 * @new_backup_offset: New backup offset if @switch_backup is true.
470 * Currently unreserving a resource means putting it back on the device's
471 * resource lru list, so that it can be evicted if necessary.
473 void vmw_resource_unreserve(struct vmw_resource *res,
477 struct vmw_buffer_object *new_backup,
478 unsigned long new_backup_offset)
480 struct vmw_private *dev_priv = res->dev_priv;
482 if (!list_empty(&res->lru_head))
485 if (switch_backup && new_backup != res->backup) {
487 vmw_resource_mob_detach(res);
489 vmw_bo_dirty_release(res->backup);
490 vmw_bo_unreference(&res->backup);
494 res->backup = vmw_bo_reference(new_backup);
497 * The validation code should already have added a
498 * dirty tracker here.
500 WARN_ON(res->coherent && !new_backup->dirty);
502 vmw_resource_mob_attach(res);
506 } else if (switch_backup && res->coherent) {
507 vmw_bo_dirty_release(res->backup);
511 res->backup_offset = new_backup_offset;
514 res->res_dirty = dirty;
516 if (!res->func->may_evict || res->id == -1 || res->pin_count)
519 spin_lock(&dev_priv->resource_lock);
520 list_add_tail(&res->lru_head,
521 &res->dev_priv->res_lru[res->func->res_type]);
522 spin_unlock(&dev_priv->resource_lock);
526 * vmw_resource_check_buffer - Check whether a backup buffer is needed
527 * for a resource and in that case, allocate
528 * one, reserve and validate it.
530 * @ticket: The ww aqcquire context to use, or NULL if trylocking.
531 * @res: The resource for which to allocate a backup buffer.
532 * @interruptible: Whether any sleeps during allocation should be
533 * performed while interruptible.
534 * @val_buf: On successful return contains data about the
535 * reserved and validated backup buffer.
538 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
539 struct vmw_resource *res,
541 struct ttm_validate_buffer *val_buf)
543 struct ttm_operation_ctx ctx = { true, false };
544 struct list_head val_list;
545 bool backup_dirty = false;
548 if (unlikely(res->backup == NULL)) {
549 ret = vmw_resource_buf_alloc(res, interruptible);
550 if (unlikely(ret != 0))
554 INIT_LIST_HEAD(&val_list);
555 ttm_bo_get(&res->backup->base);
556 val_buf->bo = &res->backup->base;
557 val_buf->num_shared = 0;
558 list_add_tail(&val_buf->head, &val_list);
559 ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
560 if (unlikely(ret != 0))
563 if (res->func->needs_backup && !vmw_resource_mob_attached(res))
566 backup_dirty = res->backup_dirty;
567 ret = ttm_bo_validate(&res->backup->base,
568 res->func->backup_placement,
571 if (unlikely(ret != 0))
572 goto out_no_validate;
577 ttm_eu_backoff_reservation(ticket, &val_list);
579 ttm_bo_put(val_buf->bo);
582 vmw_bo_unreference(&res->backup);
588 * vmw_resource_reserve - Reserve a resource for command submission
590 * @res: The resource to reserve.
592 * This function takes the resource off the LRU list and make sure
593 * a backup buffer is present for guest-backed resources. However,
594 * the buffer may not be bound to the resource at this point.
597 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
600 struct vmw_private *dev_priv = res->dev_priv;
603 spin_lock(&dev_priv->resource_lock);
604 list_del_init(&res->lru_head);
605 spin_unlock(&dev_priv->resource_lock);
607 if (res->func->needs_backup && res->backup == NULL &&
609 ret = vmw_resource_buf_alloc(res, interruptible);
610 if (unlikely(ret != 0)) {
611 DRM_ERROR("Failed to allocate a backup buffer "
612 "of size %lu. bytes\n",
613 (unsigned long) res->backup_size);
622 * vmw_resource_backoff_reservation - Unreserve and unreference a
625 * @ticket: The ww acquire ctx used for reservation.
626 * @val_buf: Backup buffer information.
629 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
630 struct ttm_validate_buffer *val_buf)
632 struct list_head val_list;
634 if (likely(val_buf->bo == NULL))
637 INIT_LIST_HEAD(&val_list);
638 list_add_tail(&val_buf->head, &val_list);
639 ttm_eu_backoff_reservation(ticket, &val_list);
640 ttm_bo_put(val_buf->bo);
645 * vmw_resource_do_evict - Evict a resource, and transfer its data
646 * to a backup buffer.
648 * @ticket: The ww acquire ticket to use, or NULL if trylocking.
649 * @res: The resource to evict.
650 * @interruptible: Whether to wait interruptible.
652 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
653 struct vmw_resource *res, bool interruptible)
655 struct ttm_validate_buffer val_buf;
656 const struct vmw_res_func *func = res->func;
659 BUG_ON(!func->may_evict);
662 val_buf.num_shared = 0;
663 ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
664 if (unlikely(ret != 0))
667 if (unlikely(func->unbind != NULL &&
668 (!func->needs_backup || vmw_resource_mob_attached(res)))) {
669 ret = func->unbind(res, res->res_dirty, &val_buf);
670 if (unlikely(ret != 0))
672 vmw_resource_mob_detach(res);
674 ret = func->destroy(res);
675 res->backup_dirty = true;
676 res->res_dirty = false;
678 vmw_resource_backoff_reservation(ticket, &val_buf);
685 * vmw_resource_validate - Make a resource up-to-date and visible
687 * @res: The resource to make visible to the device.
688 * @intr: Perform waits interruptible if possible.
689 * @dirtying: Pending GPU operation will dirty the resource
691 * On succesful return, any backup DMA buffer pointed to by @res->backup will
692 * be reserved and validated.
693 * On hardware resource shortage, this function will repeatedly evict
694 * resources of the same type until the validation succeeds.
696 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
699 int vmw_resource_validate(struct vmw_resource *res, bool intr,
703 struct vmw_resource *evict_res;
704 struct vmw_private *dev_priv = res->dev_priv;
705 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
706 struct ttm_validate_buffer val_buf;
707 unsigned err_count = 0;
709 if (!res->func->create)
713 val_buf.num_shared = 0;
715 val_buf.bo = &res->backup->base;
717 ret = vmw_resource_do_validate(res, &val_buf, dirtying);
718 if (likely(ret != -EBUSY))
721 spin_lock(&dev_priv->resource_lock);
722 if (list_empty(lru_list) || !res->func->may_evict) {
723 DRM_ERROR("Out of device device resources "
724 "for %s.\n", res->func->type_name);
726 spin_unlock(&dev_priv->resource_lock);
730 evict_res = vmw_resource_reference
731 (list_first_entry(lru_list, struct vmw_resource,
733 list_del_init(&evict_res->lru_head);
735 spin_unlock(&dev_priv->resource_lock);
737 /* Trylock backup buffers with a NULL ticket. */
738 ret = vmw_resource_do_evict(NULL, evict_res, intr);
739 if (unlikely(ret != 0)) {
740 spin_lock(&dev_priv->resource_lock);
741 list_add_tail(&evict_res->lru_head, lru_list);
742 spin_unlock(&dev_priv->resource_lock);
743 if (ret == -ERESTARTSYS ||
744 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
745 vmw_resource_unreference(&evict_res);
746 goto out_no_validate;
750 vmw_resource_unreference(&evict_res);
753 if (unlikely(ret != 0))
754 goto out_no_validate;
755 else if (!res->func->needs_backup && res->backup) {
756 WARN_ON_ONCE(vmw_resource_mob_attached(res));
757 vmw_bo_unreference(&res->backup);
768 * vmw_resource_unbind_list
770 * @vbo: Pointer to the current backing MOB.
772 * Evicts the Guest Backed hardware resource if the backup
773 * buffer is being moved out of MOB memory.
774 * Note that this function will not race with the resource
775 * validation code, since resource validation and eviction
776 * both require the backup buffer to be reserved.
778 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
780 struct ttm_validate_buffer val_buf = {
785 dma_resv_assert_held(vbo->base.base.resv);
786 while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
787 struct rb_node *node = vbo->res_tree.rb_node;
788 struct vmw_resource *res =
789 container_of(node, struct vmw_resource, mob_node);
791 if (!WARN_ON_ONCE(!res->func->unbind))
792 (void) res->func->unbind(res, res->res_dirty, &val_buf);
794 res->backup_dirty = true;
795 res->res_dirty = false;
796 vmw_resource_mob_detach(res);
799 (void) ttm_bo_wait(&vbo->base, false, false);
804 * vmw_query_readback_all - Read back cached query states
806 * @dx_query_mob: Buffer containing the DX query MOB
808 * Read back cached states from the device if they exist. This function
809 * assumings binding_mutex is held.
811 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
813 struct vmw_resource *dx_query_ctx;
814 struct vmw_private *dev_priv;
816 SVGA3dCmdHeader header;
817 SVGA3dCmdDXReadbackAllQuery body;
821 /* No query bound, so do nothing */
822 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
825 dx_query_ctx = dx_query_mob->dx_query_ctx;
826 dev_priv = dx_query_ctx->dev_priv;
828 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
829 if (unlikely(cmd == NULL))
832 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
833 cmd->header.size = sizeof(cmd->body);
834 cmd->body.cid = dx_query_ctx->id;
836 vmw_cmd_commit(dev_priv, sizeof(*cmd));
838 /* Triggers a rebind the next time affected context is bound */
839 dx_query_mob->dx_query_ctx = NULL;
847 * vmw_query_move_notify - Read back cached query states
849 * @bo: The TTM buffer object about to move.
850 * @old_mem: The memory region @bo is moving from.
851 * @new_mem: The memory region @bo is moving to.
853 * Called before the query MOB is swapped out to read back cached query
854 * states from the device.
856 void vmw_query_move_notify(struct ttm_buffer_object *bo,
857 struct ttm_resource *old_mem,
858 struct ttm_resource *new_mem)
860 struct vmw_buffer_object *dx_query_mob;
861 struct ttm_device *bdev = bo->bdev;
862 struct vmw_private *dev_priv;
865 dev_priv = container_of(bdev, struct vmw_private, bdev);
867 mutex_lock(&dev_priv->binding_mutex);
869 dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
870 if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
871 mutex_unlock(&dev_priv->binding_mutex);
875 /* If BO is being moved from MOB to system memory */
876 if (new_mem->mem_type == TTM_PL_SYSTEM &&
877 old_mem->mem_type == VMW_PL_MOB) {
878 struct vmw_fence_obj *fence;
880 (void) vmw_query_readback_all(dx_query_mob);
881 mutex_unlock(&dev_priv->binding_mutex);
883 /* Create a fence and attach the BO to it */
884 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
885 vmw_bo_fence_single(bo, fence);
888 vmw_fence_obj_unreference(&fence);
890 (void) ttm_bo_wait(bo, false, false);
892 mutex_unlock(&dev_priv->binding_mutex);
897 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
899 * @res: The resource being queried.
901 bool vmw_resource_needs_backup(const struct vmw_resource *res)
903 return res->func->needs_backup;
907 * vmw_resource_evict_type - Evict all resources of a specific type
909 * @dev_priv: Pointer to a device private struct
910 * @type: The resource type to evict
912 * To avoid thrashing starvation or as part of the hibernation sequence,
913 * try to evict all evictable resources of a specific type.
915 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
916 enum vmw_res_type type)
918 struct list_head *lru_list = &dev_priv->res_lru[type];
919 struct vmw_resource *evict_res;
920 unsigned err_count = 0;
922 struct ww_acquire_ctx ticket;
925 spin_lock(&dev_priv->resource_lock);
927 if (list_empty(lru_list))
930 evict_res = vmw_resource_reference(
931 list_first_entry(lru_list, struct vmw_resource,
933 list_del_init(&evict_res->lru_head);
934 spin_unlock(&dev_priv->resource_lock);
936 /* Wait lock backup buffers with a ticket. */
937 ret = vmw_resource_do_evict(&ticket, evict_res, false);
938 if (unlikely(ret != 0)) {
939 spin_lock(&dev_priv->resource_lock);
940 list_add_tail(&evict_res->lru_head, lru_list);
941 spin_unlock(&dev_priv->resource_lock);
942 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
943 vmw_resource_unreference(&evict_res);
948 vmw_resource_unreference(&evict_res);
952 spin_unlock(&dev_priv->resource_lock);
956 * vmw_resource_evict_all - Evict all evictable resources
958 * @dev_priv: Pointer to a device private struct
960 * To avoid thrashing starvation or as part of the hibernation sequence,
961 * evict all evictable resources. In particular this means that all
962 * guest-backed resources that are registered with the device are
963 * evicted and the OTable becomes clean.
965 void vmw_resource_evict_all(struct vmw_private *dev_priv)
967 enum vmw_res_type type;
969 mutex_lock(&dev_priv->cmdbuf_mutex);
971 for (type = 0; type < vmw_res_max; ++type)
972 vmw_resource_evict_type(dev_priv, type);
974 mutex_unlock(&dev_priv->cmdbuf_mutex);
978 * vmw_resource_pin - Add a pin reference on a resource
980 * @res: The resource to add a pin reference on
982 * This function adds a pin reference, and if needed validates the resource.
983 * Having a pin reference means that the resource can never be evicted, and
984 * its id will never change as long as there is a pin reference.
985 * This function returns 0 on success and a negative error code on failure.
987 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
989 struct ttm_operation_ctx ctx = { interruptible, false };
990 struct vmw_private *dev_priv = res->dev_priv;
993 mutex_lock(&dev_priv->cmdbuf_mutex);
994 ret = vmw_resource_reserve(res, interruptible, false);
998 if (res->pin_count == 0) {
999 struct vmw_buffer_object *vbo = NULL;
1004 ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1005 if (!vbo->base.pin_count) {
1006 ret = ttm_bo_validate
1008 res->func->backup_placement,
1011 ttm_bo_unreserve(&vbo->base);
1012 goto out_no_validate;
1016 /* Do we really need to pin the MOB as well? */
1017 vmw_bo_pin_reserved(vbo, true);
1019 ret = vmw_resource_validate(res, interruptible, true);
1021 ttm_bo_unreserve(&vbo->base);
1023 goto out_no_validate;
1028 vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1030 mutex_unlock(&dev_priv->cmdbuf_mutex);
1036 * vmw_resource_unpin - Remove a pin reference from a resource
1038 * @res: The resource to remove a pin reference from
1040 * Having a pin reference means that the resource can never be evicted, and
1041 * its id will never change as long as there is a pin reference.
1043 void vmw_resource_unpin(struct vmw_resource *res)
1045 struct vmw_private *dev_priv = res->dev_priv;
1048 mutex_lock(&dev_priv->cmdbuf_mutex);
1050 ret = vmw_resource_reserve(res, false, true);
1053 WARN_ON(res->pin_count == 0);
1054 if (--res->pin_count == 0 && res->backup) {
1055 struct vmw_buffer_object *vbo = res->backup;
1057 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1058 vmw_bo_pin_reserved(vbo, false);
1059 ttm_bo_unreserve(&vbo->base);
1062 vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1064 mutex_unlock(&dev_priv->cmdbuf_mutex);
1068 * vmw_res_type - Return the resource type
1070 * @res: Pointer to the resource
1072 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1074 return res->func->res_type;
1078 * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1079 * sequential range of touched backing store memory.
1080 * @res: The resource.
1081 * @start: The first page touched.
1082 * @end: The last page touched + 1.
1084 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1088 res->func->dirty_range_add(res, start << PAGE_SHIFT,
1093 * vmw_resources_clean - Clean resources intersecting a mob range
1094 * @vbo: The mob buffer object
1095 * @start: The mob page offset starting the range
1096 * @end: The mob page offset ending the range
1097 * @num_prefault: Returns how many pages including the first have been
1098 * cleaned and are ok to prefault
1100 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1101 pgoff_t end, pgoff_t *num_prefault)
1103 struct rb_node *cur = vbo->res_tree.rb_node;
1104 struct vmw_resource *found = NULL;
1105 unsigned long res_start = start << PAGE_SHIFT;
1106 unsigned long res_end = end << PAGE_SHIFT;
1107 unsigned long last_cleaned = 0;
1110 * Find the resource with lowest backup_offset that intersects the
1114 struct vmw_resource *cur_res =
1115 container_of(cur, struct vmw_resource, mob_node);
1117 if (cur_res->backup_offset >= res_end) {
1119 } else if (cur_res->backup_offset + cur_res->backup_size <=
1121 cur = cur->rb_right;
1125 /* Continue to look for resources with lower offsets */
1130 * In order of increasing backup_offset, clean dirty resorces
1131 * intersecting the range.
1134 if (found->res_dirty) {
1137 if (!found->func->clean)
1140 ret = found->func->clean(found);
1144 found->res_dirty = false;
1146 last_cleaned = found->backup_offset + found->backup_size;
1147 cur = rb_next(&found->mob_node);
1151 found = container_of(cur, struct vmw_resource, mob_node);
1152 if (found->backup_offset >= res_end)
1157 * Set number of pages allowed prefaulting and fence the buffer object
1160 if (last_cleaned > res_start) {
1161 struct ttm_buffer_object *bo = &vbo->base;
1163 *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1165 vmw_bo_fence_single(bo, NULL);
1167 dma_fence_put(bo->moving);
1168 bo->moving = dma_fence_get
1169 (dma_resv_excl_fence(bo->base.resv));