1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_placement.h>
32 #include "vmwgfx_resource_priv.h"
33 #include "vmwgfx_binding.h"
35 #define VMW_RES_EVICT_ERR_COUNT 10
37 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
44 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
46 return kref_get_unless_zero(&res->kref) ? res : NULL;
50 * vmw_resource_release_id - release a resource id to the id manager.
52 * @res: Pointer to the resource.
54 * Release the resource id to the resource id manager and set it to -1
56 void vmw_resource_release_id(struct vmw_resource *res)
58 struct vmw_private *dev_priv = res->dev_priv;
59 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
61 write_lock(&dev_priv->resource_lock);
63 idr_remove(idr, res->id);
65 write_unlock(&dev_priv->resource_lock);
68 static void vmw_resource_release(struct kref *kref)
70 struct vmw_resource *res =
71 container_of(kref, struct vmw_resource, kref);
72 struct vmw_private *dev_priv = res->dev_priv;
74 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
76 write_lock(&dev_priv->resource_lock);
78 list_del_init(&res->lru_head);
79 write_unlock(&dev_priv->resource_lock);
81 struct ttm_buffer_object *bo = &res->backup->base;
83 ttm_bo_reserve(bo, false, false, NULL);
84 if (!list_empty(&res->mob_head) &&
85 res->func->unbind != NULL) {
86 struct ttm_validate_buffer val_buf;
89 val_buf.shared = false;
90 res->func->unbind(res, false, &val_buf);
92 res->backup_dirty = false;
93 list_del_init(&res->mob_head);
95 vmw_bo_unreference(&res->backup);
98 if (likely(res->hw_destroy != NULL)) {
99 mutex_lock(&dev_priv->binding_mutex);
100 vmw_binding_res_list_kill(&res->binding_head);
101 mutex_unlock(&dev_priv->binding_mutex);
102 res->hw_destroy(res);
106 if (res->res_free != NULL)
111 write_lock(&dev_priv->resource_lock);
114 write_unlock(&dev_priv->resource_lock);
117 void vmw_resource_unreference(struct vmw_resource **p_res)
119 struct vmw_resource *res = *p_res;
122 kref_put(&res->kref, vmw_resource_release);
127 * vmw_resource_alloc_id - release a resource id to the id manager.
129 * @res: Pointer to the resource.
131 * Allocate the lowest free resource from the resource manager, and set
132 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
134 int vmw_resource_alloc_id(struct vmw_resource *res)
136 struct vmw_private *dev_priv = res->dev_priv;
138 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
140 BUG_ON(res->id != -1);
142 idr_preload(GFP_KERNEL);
143 write_lock(&dev_priv->resource_lock);
145 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
149 write_unlock(&dev_priv->resource_lock);
151 return ret < 0 ? ret : 0;
155 * vmw_resource_init - initialize a struct vmw_resource
157 * @dev_priv: Pointer to a device private struct.
158 * @res: The struct vmw_resource to initialize.
159 * @obj_type: Resource object type.
160 * @delay_id: Boolean whether to defer device id allocation until
161 * the first validation.
162 * @res_free: Resource destructor.
163 * @func: Resource function table.
165 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
167 void (*res_free) (struct vmw_resource *res),
168 const struct vmw_res_func *func)
170 kref_init(&res->kref);
171 res->hw_destroy = NULL;
172 res->res_free = res_free;
174 res->dev_priv = dev_priv;
176 INIT_LIST_HEAD(&res->lru_head);
177 INIT_LIST_HEAD(&res->mob_head);
178 INIT_LIST_HEAD(&res->binding_head);
181 res->backup_offset = 0;
182 res->backup_dirty = false;
183 res->res_dirty = false;
187 return vmw_resource_alloc_id(res);
191 * vmw_resource_activate
193 * @res: Pointer to the newly created resource
194 * @hw_destroy: Destroy function. NULL if none.
196 * Activate a resource after the hardware has been made aware of it.
197 * Set tye destroy function to @destroy. Typically this frees the
198 * resource and destroys the hardware resources associated with it.
199 * Activate basically means that the function vmw_resource_lookup will
202 void vmw_resource_activate(struct vmw_resource *res,
203 void (*hw_destroy) (struct vmw_resource *))
205 struct vmw_private *dev_priv = res->dev_priv;
207 write_lock(&dev_priv->resource_lock);
209 res->hw_destroy = hw_destroy;
210 write_unlock(&dev_priv->resource_lock);
214 * vmw_user_resource_lookup_handle - lookup a struct resource from a
215 * TTM user-space handle and perform basic type checks
217 * @dev_priv: Pointer to a device private struct
218 * @tfile: Pointer to a struct ttm_object_file identifying the caller
219 * @handle: The TTM user-space handle
220 * @converter: Pointer to an object describing the resource type
221 * @p_res: On successful return the location pointed to will contain
222 * a pointer to a refcounted struct vmw_resource.
224 * If the handle can't be found or is associated with an incorrect resource
225 * type, -EINVAL will be returned.
227 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
228 struct ttm_object_file *tfile,
230 const struct vmw_user_resource_conv
232 struct vmw_resource **p_res)
234 struct ttm_base_object *base;
235 struct vmw_resource *res;
238 base = ttm_base_object_lookup(tfile, handle);
239 if (unlikely(base == NULL))
242 if (unlikely(ttm_base_object_type(base) != converter->object_type))
243 goto out_bad_resource;
245 res = converter->base_obj_to_res(base);
247 read_lock(&dev_priv->resource_lock);
248 if (!res->avail || res->res_free != converter->res_free) {
249 read_unlock(&dev_priv->resource_lock);
250 goto out_bad_resource;
253 kref_get(&res->kref);
254 read_unlock(&dev_priv->resource_lock);
260 ttm_base_object_unref(&base);
266 * Helper function that looks either a surface or bo.
268 * The pointer this pointed at by out_surf and out_buf needs to be null.
270 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
271 struct ttm_object_file *tfile,
273 struct vmw_surface **out_surf,
274 struct vmw_buffer_object **out_buf)
276 struct vmw_resource *res;
279 BUG_ON(*out_surf || *out_buf);
281 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
282 user_surface_converter,
285 *out_surf = vmw_res_to_srf(res);
290 ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
295 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
297 * @res: The resource for which to allocate a backup buffer.
298 * @interruptible: Whether any sleeps during allocation should be
299 * performed while interruptible.
301 static int vmw_resource_buf_alloc(struct vmw_resource *res,
305 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
306 struct vmw_buffer_object *backup;
309 if (likely(res->backup)) {
310 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
314 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
315 if (unlikely(!backup))
318 ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
319 res->func->backup_placement,
322 if (unlikely(ret != 0))
325 res->backup = backup;
332 * vmw_resource_do_validate - Make a resource up-to-date and visible
335 * @res: The resource to make visible to the device.
336 * @val_buf: Information about a buffer possibly
337 * containing backup data if a bind operation is needed.
339 * On hardware resource shortage, this function returns -EBUSY and
340 * should be retried once resources have been freed up.
342 static int vmw_resource_do_validate(struct vmw_resource *res,
343 struct ttm_validate_buffer *val_buf)
346 const struct vmw_res_func *func = res->func;
348 if (unlikely(res->id == -1)) {
349 ret = func->create(res);
350 if (unlikely(ret != 0))
355 ((func->needs_backup && list_empty(&res->mob_head) &&
356 val_buf->bo != NULL) ||
357 (!func->needs_backup && val_buf->bo != NULL))) {
358 ret = func->bind(res, val_buf);
359 if (unlikely(ret != 0))
360 goto out_bind_failed;
361 if (func->needs_backup)
362 list_add_tail(&res->mob_head, &res->backup->res_list);
366 * Only do this on write operations, and move to
367 * vmw_resource_unreserve if it can be called after
368 * backup buffers have been unreserved. Otherwise
371 res->res_dirty = true;
382 * vmw_resource_unreserve - Unreserve a resource previously reserved for
383 * command submission.
385 * @res: Pointer to the struct vmw_resource to unreserve.
386 * @switch_backup: Backup buffer has been switched.
387 * @new_backup: Pointer to new backup buffer if command submission
388 * switched. May be NULL.
389 * @new_backup_offset: New backup offset if @switch_backup is true.
391 * Currently unreserving a resource means putting it back on the device's
392 * resource lru list, so that it can be evicted if necessary.
394 void vmw_resource_unreserve(struct vmw_resource *res,
396 struct vmw_buffer_object *new_backup,
397 unsigned long new_backup_offset)
399 struct vmw_private *dev_priv = res->dev_priv;
401 if (!list_empty(&res->lru_head))
404 if (switch_backup && new_backup != res->backup) {
406 lockdep_assert_held(&res->backup->base.resv->lock.base);
407 list_del_init(&res->mob_head);
408 vmw_bo_unreference(&res->backup);
412 res->backup = vmw_bo_reference(new_backup);
413 lockdep_assert_held(&new_backup->base.resv->lock.base);
414 list_add_tail(&res->mob_head, &new_backup->res_list);
420 res->backup_offset = new_backup_offset;
422 if (!res->func->may_evict || res->id == -1 || res->pin_count)
425 write_lock(&dev_priv->resource_lock);
426 list_add_tail(&res->lru_head,
427 &res->dev_priv->res_lru[res->func->res_type]);
428 write_unlock(&dev_priv->resource_lock);
432 * vmw_resource_check_buffer - Check whether a backup buffer is needed
433 * for a resource and in that case, allocate
434 * one, reserve and validate it.
436 * @ticket: The ww aqcquire context to use, or NULL if trylocking.
437 * @res: The resource for which to allocate a backup buffer.
438 * @interruptible: Whether any sleeps during allocation should be
439 * performed while interruptible.
440 * @val_buf: On successful return contains data about the
441 * reserved and validated backup buffer.
444 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
445 struct vmw_resource *res,
447 struct ttm_validate_buffer *val_buf)
449 struct ttm_operation_ctx ctx = { true, false };
450 struct list_head val_list;
451 bool backup_dirty = false;
454 if (unlikely(res->backup == NULL)) {
455 ret = vmw_resource_buf_alloc(res, interruptible);
456 if (unlikely(ret != 0))
460 INIT_LIST_HEAD(&val_list);
461 val_buf->bo = ttm_bo_reference(&res->backup->base);
462 val_buf->shared = false;
463 list_add_tail(&val_buf->head, &val_list);
464 ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
465 if (unlikely(ret != 0))
468 if (res->func->needs_backup && list_empty(&res->mob_head))
471 backup_dirty = res->backup_dirty;
472 ret = ttm_bo_validate(&res->backup->base,
473 res->func->backup_placement,
476 if (unlikely(ret != 0))
477 goto out_no_validate;
482 ttm_eu_backoff_reservation(ticket, &val_list);
484 ttm_bo_unref(&val_buf->bo);
486 vmw_bo_unreference(&res->backup);
492 * vmw_resource_reserve - Reserve a resource for command submission
494 * @res: The resource to reserve.
496 * This function takes the resource off the LRU list and make sure
497 * a backup buffer is present for guest-backed resources. However,
498 * the buffer may not be bound to the resource at this point.
501 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
504 struct vmw_private *dev_priv = res->dev_priv;
507 write_lock(&dev_priv->resource_lock);
508 list_del_init(&res->lru_head);
509 write_unlock(&dev_priv->resource_lock);
511 if (res->func->needs_backup && res->backup == NULL &&
513 ret = vmw_resource_buf_alloc(res, interruptible);
514 if (unlikely(ret != 0)) {
515 DRM_ERROR("Failed to allocate a backup buffer "
516 "of size %lu. bytes\n",
517 (unsigned long) res->backup_size);
526 * vmw_resource_backoff_reservation - Unreserve and unreference a
529 * @ticket: The ww acquire ctx used for reservation.
530 * @val_buf: Backup buffer information.
533 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
534 struct ttm_validate_buffer *val_buf)
536 struct list_head val_list;
538 if (likely(val_buf->bo == NULL))
541 INIT_LIST_HEAD(&val_list);
542 list_add_tail(&val_buf->head, &val_list);
543 ttm_eu_backoff_reservation(ticket, &val_list);
544 ttm_bo_unref(&val_buf->bo);
548 * vmw_resource_do_evict - Evict a resource, and transfer its data
549 * to a backup buffer.
551 * @ticket: The ww acquire ticket to use, or NULL if trylocking.
552 * @res: The resource to evict.
553 * @interruptible: Whether to wait interruptible.
555 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
556 struct vmw_resource *res, bool interruptible)
558 struct ttm_validate_buffer val_buf;
559 const struct vmw_res_func *func = res->func;
562 BUG_ON(!func->may_evict);
565 val_buf.shared = false;
566 ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
567 if (unlikely(ret != 0))
570 if (unlikely(func->unbind != NULL &&
571 (!func->needs_backup || !list_empty(&res->mob_head)))) {
572 ret = func->unbind(res, res->res_dirty, &val_buf);
573 if (unlikely(ret != 0))
575 list_del_init(&res->mob_head);
577 ret = func->destroy(res);
578 res->backup_dirty = true;
579 res->res_dirty = false;
581 vmw_resource_backoff_reservation(ticket, &val_buf);
588 * vmw_resource_validate - Make a resource up-to-date and visible
591 * @res: The resource to make visible to the device.
593 * On succesful return, any backup DMA buffer pointed to by @res->backup will
594 * be reserved and validated.
595 * On hardware resource shortage, this function will repeatedly evict
596 * resources of the same type until the validation succeeds.
598 int vmw_resource_validate(struct vmw_resource *res)
601 struct vmw_resource *evict_res;
602 struct vmw_private *dev_priv = res->dev_priv;
603 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
604 struct ttm_validate_buffer val_buf;
605 unsigned err_count = 0;
607 if (!res->func->create)
611 val_buf.shared = false;
613 val_buf.bo = &res->backup->base;
615 ret = vmw_resource_do_validate(res, &val_buf);
616 if (likely(ret != -EBUSY))
619 write_lock(&dev_priv->resource_lock);
620 if (list_empty(lru_list) || !res->func->may_evict) {
621 DRM_ERROR("Out of device device resources "
622 "for %s.\n", res->func->type_name);
624 write_unlock(&dev_priv->resource_lock);
628 evict_res = vmw_resource_reference
629 (list_first_entry(lru_list, struct vmw_resource,
631 list_del_init(&evict_res->lru_head);
633 write_unlock(&dev_priv->resource_lock);
635 /* Trylock backup buffers with a NULL ticket. */
636 ret = vmw_resource_do_evict(NULL, evict_res, true);
637 if (unlikely(ret != 0)) {
638 write_lock(&dev_priv->resource_lock);
639 list_add_tail(&evict_res->lru_head, lru_list);
640 write_unlock(&dev_priv->resource_lock);
641 if (ret == -ERESTARTSYS ||
642 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
643 vmw_resource_unreference(&evict_res);
644 goto out_no_validate;
648 vmw_resource_unreference(&evict_res);
651 if (unlikely(ret != 0))
652 goto out_no_validate;
653 else if (!res->func->needs_backup && res->backup) {
654 list_del_init(&res->mob_head);
655 vmw_bo_unreference(&res->backup);
666 * vmw_resource_unbind_list
668 * @vbo: Pointer to the current backing MOB.
670 * Evicts the Guest Backed hardware resource if the backup
671 * buffer is being moved out of MOB memory.
672 * Note that this function will not race with the resource
673 * validation code, since resource validation and eviction
674 * both require the backup buffer to be reserved.
676 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
679 struct vmw_resource *res, *next;
680 struct ttm_validate_buffer val_buf = {
685 lockdep_assert_held(&vbo->base.resv->lock.base);
686 list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
687 if (!res->func->unbind)
690 (void) res->func->unbind(res, true, &val_buf);
691 res->backup_dirty = true;
692 res->res_dirty = false;
693 list_del_init(&res->mob_head);
696 (void) ttm_bo_wait(&vbo->base, false, false);
701 * vmw_query_readback_all - Read back cached query states
703 * @dx_query_mob: Buffer containing the DX query MOB
705 * Read back cached states from the device if they exist. This function
706 * assumings binding_mutex is held.
708 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
710 struct vmw_resource *dx_query_ctx;
711 struct vmw_private *dev_priv;
713 SVGA3dCmdHeader header;
714 SVGA3dCmdDXReadbackAllQuery body;
718 /* No query bound, so do nothing */
719 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
722 dx_query_ctx = dx_query_mob->dx_query_ctx;
723 dev_priv = dx_query_ctx->dev_priv;
725 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
726 if (unlikely(cmd == NULL)) {
727 DRM_ERROR("Failed reserving FIFO space for "
728 "query MOB read back.\n");
732 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
733 cmd->header.size = sizeof(cmd->body);
734 cmd->body.cid = dx_query_ctx->id;
736 vmw_fifo_commit(dev_priv, sizeof(*cmd));
738 /* Triggers a rebind the next time affected context is bound */
739 dx_query_mob->dx_query_ctx = NULL;
747 * vmw_query_move_notify - Read back cached query states
749 * @bo: The TTM buffer object about to move.
750 * @mem: The memory region @bo is moving to.
752 * Called before the query MOB is swapped out to read back cached query
753 * states from the device.
755 void vmw_query_move_notify(struct ttm_buffer_object *bo,
756 struct ttm_mem_reg *mem)
758 struct vmw_buffer_object *dx_query_mob;
759 struct ttm_bo_device *bdev = bo->bdev;
760 struct vmw_private *dev_priv;
763 dev_priv = container_of(bdev, struct vmw_private, bdev);
765 mutex_lock(&dev_priv->binding_mutex);
767 dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
768 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
769 mutex_unlock(&dev_priv->binding_mutex);
773 /* If BO is being moved from MOB to system memory */
774 if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
775 struct vmw_fence_obj *fence;
777 (void) vmw_query_readback_all(dx_query_mob);
778 mutex_unlock(&dev_priv->binding_mutex);
780 /* Create a fence and attach the BO to it */
781 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
782 vmw_bo_fence_single(bo, fence);
785 vmw_fence_obj_unreference(&fence);
787 (void) ttm_bo_wait(bo, false, false);
789 mutex_unlock(&dev_priv->binding_mutex);
794 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
796 * @res: The resource being queried.
798 bool vmw_resource_needs_backup(const struct vmw_resource *res)
800 return res->func->needs_backup;
804 * vmw_resource_evict_type - Evict all resources of a specific type
806 * @dev_priv: Pointer to a device private struct
807 * @type: The resource type to evict
809 * To avoid thrashing starvation or as part of the hibernation sequence,
810 * try to evict all evictable resources of a specific type.
812 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
813 enum vmw_res_type type)
815 struct list_head *lru_list = &dev_priv->res_lru[type];
816 struct vmw_resource *evict_res;
817 unsigned err_count = 0;
819 struct ww_acquire_ctx ticket;
822 write_lock(&dev_priv->resource_lock);
824 if (list_empty(lru_list))
827 evict_res = vmw_resource_reference(
828 list_first_entry(lru_list, struct vmw_resource,
830 list_del_init(&evict_res->lru_head);
831 write_unlock(&dev_priv->resource_lock);
833 /* Wait lock backup buffers with a ticket. */
834 ret = vmw_resource_do_evict(&ticket, evict_res, false);
835 if (unlikely(ret != 0)) {
836 write_lock(&dev_priv->resource_lock);
837 list_add_tail(&evict_res->lru_head, lru_list);
838 write_unlock(&dev_priv->resource_lock);
839 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
840 vmw_resource_unreference(&evict_res);
845 vmw_resource_unreference(&evict_res);
849 write_unlock(&dev_priv->resource_lock);
853 * vmw_resource_evict_all - Evict all evictable resources
855 * @dev_priv: Pointer to a device private struct
857 * To avoid thrashing starvation or as part of the hibernation sequence,
858 * evict all evictable resources. In particular this means that all
859 * guest-backed resources that are registered with the device are
860 * evicted and the OTable becomes clean.
862 void vmw_resource_evict_all(struct vmw_private *dev_priv)
864 enum vmw_res_type type;
866 mutex_lock(&dev_priv->cmdbuf_mutex);
868 for (type = 0; type < vmw_res_max; ++type)
869 vmw_resource_evict_type(dev_priv, type);
871 mutex_unlock(&dev_priv->cmdbuf_mutex);
875 * vmw_resource_pin - Add a pin reference on a resource
877 * @res: The resource to add a pin reference on
879 * This function adds a pin reference, and if needed validates the resource.
880 * Having a pin reference means that the resource can never be evicted, and
881 * its id will never change as long as there is a pin reference.
882 * This function returns 0 on success and a negative error code on failure.
884 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
886 struct ttm_operation_ctx ctx = { interruptible, false };
887 struct vmw_private *dev_priv = res->dev_priv;
890 ttm_write_lock(&dev_priv->reservation_sem, interruptible);
891 mutex_lock(&dev_priv->cmdbuf_mutex);
892 ret = vmw_resource_reserve(res, interruptible, false);
896 if (res->pin_count == 0) {
897 struct vmw_buffer_object *vbo = NULL;
902 ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
903 if (!vbo->pin_count) {
904 ret = ttm_bo_validate
906 res->func->backup_placement,
909 ttm_bo_unreserve(&vbo->base);
910 goto out_no_validate;
914 /* Do we really need to pin the MOB as well? */
915 vmw_bo_pin_reserved(vbo, true);
917 ret = vmw_resource_validate(res);
919 ttm_bo_unreserve(&vbo->base);
921 goto out_no_validate;
926 vmw_resource_unreserve(res, false, NULL, 0UL);
928 mutex_unlock(&dev_priv->cmdbuf_mutex);
929 ttm_write_unlock(&dev_priv->reservation_sem);
935 * vmw_resource_unpin - Remove a pin reference from a resource
937 * @res: The resource to remove a pin reference from
939 * Having a pin reference means that the resource can never be evicted, and
940 * its id will never change as long as there is a pin reference.
942 void vmw_resource_unpin(struct vmw_resource *res)
944 struct vmw_private *dev_priv = res->dev_priv;
947 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
948 mutex_lock(&dev_priv->cmdbuf_mutex);
950 ret = vmw_resource_reserve(res, false, true);
953 WARN_ON(res->pin_count == 0);
954 if (--res->pin_count == 0 && res->backup) {
955 struct vmw_buffer_object *vbo = res->backup;
957 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
958 vmw_bo_pin_reserved(vbo, false);
959 ttm_bo_unreserve(&vbo->base);
962 vmw_resource_unreserve(res, false, NULL, 0UL);
964 mutex_unlock(&dev_priv->cmdbuf_mutex);
965 ttm_read_unlock(&dev_priv->reservation_sem);
969 * vmw_res_type - Return the resource type
971 * @res: Pointer to the resource
973 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
975 return res->func->res_type;