drm/vmwgfx: stop using dma_resv_excl_fence v2
[linux-2.6-microblaze.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <drm/ttm/ttm_placement.h>
29
30 #include "vmwgfx_resource_priv.h"
31 #include "vmwgfx_binding.h"
32 #include "vmwgfx_drv.h"
33
34 #define VMW_RES_EVICT_ERR_COUNT 10
35
36 /**
37  * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
38  * @res: The resource
39  */
40 void vmw_resource_mob_attach(struct vmw_resource *res)
41 {
42         struct vmw_buffer_object *backup = res->backup;
43         struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
44
45         dma_resv_assert_held(res->backup->base.base.resv);
46         res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
47                 res->func->prio;
48
49         while (*new) {
50                 struct vmw_resource *this =
51                         container_of(*new, struct vmw_resource, mob_node);
52
53                 parent = *new;
54                 new = (res->backup_offset < this->backup_offset) ?
55                         &((*new)->rb_left) : &((*new)->rb_right);
56         }
57
58         rb_link_node(&res->mob_node, parent, new);
59         rb_insert_color(&res->mob_node, &backup->res_tree);
60
61         vmw_bo_prio_add(backup, res->used_prio);
62 }
63
64 /**
65  * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
66  * @res: The resource
67  */
68 void vmw_resource_mob_detach(struct vmw_resource *res)
69 {
70         struct vmw_buffer_object *backup = res->backup;
71
72         dma_resv_assert_held(backup->base.base.resv);
73         if (vmw_resource_mob_attached(res)) {
74                 rb_erase(&res->mob_node, &backup->res_tree);
75                 RB_CLEAR_NODE(&res->mob_node);
76                 vmw_bo_prio_del(backup, res->used_prio);
77         }
78 }
79
80 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
81 {
82         kref_get(&res->kref);
83         return res;
84 }
85
86 struct vmw_resource *
87 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
88 {
89         return kref_get_unless_zero(&res->kref) ? res : NULL;
90 }
91
92 /**
93  * vmw_resource_release_id - release a resource id to the id manager.
94  *
95  * @res: Pointer to the resource.
96  *
97  * Release the resource id to the resource id manager and set it to -1
98  */
99 void vmw_resource_release_id(struct vmw_resource *res)
100 {
101         struct vmw_private *dev_priv = res->dev_priv;
102         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
103
104         spin_lock(&dev_priv->resource_lock);
105         if (res->id != -1)
106                 idr_remove(idr, res->id);
107         res->id = -1;
108         spin_unlock(&dev_priv->resource_lock);
109 }
110
111 static void vmw_resource_release(struct kref *kref)
112 {
113         struct vmw_resource *res =
114             container_of(kref, struct vmw_resource, kref);
115         struct vmw_private *dev_priv = res->dev_priv;
116         int id;
117         int ret;
118         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
119
120         spin_lock(&dev_priv->resource_lock);
121         list_del_init(&res->lru_head);
122         spin_unlock(&dev_priv->resource_lock);
123         if (res->backup) {
124                 struct ttm_buffer_object *bo = &res->backup->base;
125
126                 ret = ttm_bo_reserve(bo, false, false, NULL);
127                 BUG_ON(ret);
128                 if (vmw_resource_mob_attached(res) &&
129                     res->func->unbind != NULL) {
130                         struct ttm_validate_buffer val_buf;
131
132                         val_buf.bo = bo;
133                         val_buf.num_shared = 0;
134                         res->func->unbind(res, false, &val_buf);
135                 }
136                 res->backup_dirty = false;
137                 vmw_resource_mob_detach(res);
138                 if (res->dirty)
139                         res->func->dirty_free(res);
140                 if (res->coherent)
141                         vmw_bo_dirty_release(res->backup);
142                 ttm_bo_unreserve(bo);
143                 vmw_bo_unreference(&res->backup);
144         }
145
146         if (likely(res->hw_destroy != NULL)) {
147                 mutex_lock(&dev_priv->binding_mutex);
148                 vmw_binding_res_list_kill(&res->binding_head);
149                 mutex_unlock(&dev_priv->binding_mutex);
150                 res->hw_destroy(res);
151         }
152
153         id = res->id;
154         if (res->res_free != NULL)
155                 res->res_free(res);
156         else
157                 kfree(res);
158
159         spin_lock(&dev_priv->resource_lock);
160         if (id != -1)
161                 idr_remove(idr, id);
162         spin_unlock(&dev_priv->resource_lock);
163 }
164
165 void vmw_resource_unreference(struct vmw_resource **p_res)
166 {
167         struct vmw_resource *res = *p_res;
168
169         *p_res = NULL;
170         kref_put(&res->kref, vmw_resource_release);
171 }
172
173
174 /**
175  * vmw_resource_alloc_id - release a resource id to the id manager.
176  *
177  * @res: Pointer to the resource.
178  *
179  * Allocate the lowest free resource from the resource manager, and set
180  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
181  */
182 int vmw_resource_alloc_id(struct vmw_resource *res)
183 {
184         struct vmw_private *dev_priv = res->dev_priv;
185         int ret;
186         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
187
188         BUG_ON(res->id != -1);
189
190         idr_preload(GFP_KERNEL);
191         spin_lock(&dev_priv->resource_lock);
192
193         ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
194         if (ret >= 0)
195                 res->id = ret;
196
197         spin_unlock(&dev_priv->resource_lock);
198         idr_preload_end();
199         return ret < 0 ? ret : 0;
200 }
201
202 /**
203  * vmw_resource_init - initialize a struct vmw_resource
204  *
205  * @dev_priv:       Pointer to a device private struct.
206  * @res:            The struct vmw_resource to initialize.
207  * @delay_id:       Boolean whether to defer device id allocation until
208  *                  the first validation.
209  * @res_free:       Resource destructor.
210  * @func:           Resource function table.
211  */
212 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
213                       bool delay_id,
214                       void (*res_free) (struct vmw_resource *res),
215                       const struct vmw_res_func *func)
216 {
217         kref_init(&res->kref);
218         res->hw_destroy = NULL;
219         res->res_free = res_free;
220         res->dev_priv = dev_priv;
221         res->func = func;
222         RB_CLEAR_NODE(&res->mob_node);
223         INIT_LIST_HEAD(&res->lru_head);
224         INIT_LIST_HEAD(&res->binding_head);
225         res->id = -1;
226         res->backup = NULL;
227         res->backup_offset = 0;
228         res->backup_dirty = false;
229         res->res_dirty = false;
230         res->coherent = false;
231         res->used_prio = 3;
232         res->dirty = NULL;
233         if (delay_id)
234                 return 0;
235         else
236                 return vmw_resource_alloc_id(res);
237 }
238
239
240 /**
241  * vmw_user_resource_lookup_handle - lookup a struct resource from a
242  * TTM user-space handle and perform basic type checks
243  *
244  * @dev_priv:     Pointer to a device private struct
245  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
246  * @handle:       The TTM user-space handle
247  * @converter:    Pointer to an object describing the resource type
248  * @p_res:        On successful return the location pointed to will contain
249  *                a pointer to a refcounted struct vmw_resource.
250  *
251  * If the handle can't be found or is associated with an incorrect resource
252  * type, -EINVAL will be returned.
253  */
254 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
255                                     struct ttm_object_file *tfile,
256                                     uint32_t handle,
257                                     const struct vmw_user_resource_conv
258                                     *converter,
259                                     struct vmw_resource **p_res)
260 {
261         struct ttm_base_object *base;
262         struct vmw_resource *res;
263         int ret = -EINVAL;
264
265         base = ttm_base_object_lookup(tfile, handle);
266         if (unlikely(base == NULL))
267                 return -EINVAL;
268
269         if (unlikely(ttm_base_object_type(base) != converter->object_type))
270                 goto out_bad_resource;
271
272         res = converter->base_obj_to_res(base);
273         kref_get(&res->kref);
274
275         *p_res = res;
276         ret = 0;
277
278 out_bad_resource:
279         ttm_base_object_unref(&base);
280
281         return ret;
282 }
283
284 /**
285  * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
286  * TTM user-space handle and perform basic type checks
287  *
288  * @dev_priv:     Pointer to a device private struct
289  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
290  * @handle:       The TTM user-space handle
291  * @converter:    Pointer to an object describing the resource type
292  *
293  * If the handle can't be found or is associated with an incorrect resource
294  * type, -EINVAL will be returned.
295  */
296 struct vmw_resource *
297 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
298                                       struct ttm_object_file *tfile,
299                                       uint32_t handle,
300                                       const struct vmw_user_resource_conv
301                                       *converter)
302 {
303         struct ttm_base_object *base;
304
305         base = ttm_base_object_noref_lookup(tfile, handle);
306         if (!base)
307                 return ERR_PTR(-ESRCH);
308
309         if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
310                 ttm_base_object_noref_release();
311                 return ERR_PTR(-EINVAL);
312         }
313
314         return converter->base_obj_to_res(base);
315 }
316
317 /*
318  * Helper function that looks either a surface or bo.
319  *
320  * The pointer this pointed at by out_surf and out_buf needs to be null.
321  */
322 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
323                            struct drm_file *filp,
324                            uint32_t handle,
325                            struct vmw_surface **out_surf,
326                            struct vmw_buffer_object **out_buf)
327 {
328         struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
329         struct vmw_resource *res;
330         int ret;
331
332         BUG_ON(*out_surf || *out_buf);
333
334         ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
335                                               user_surface_converter,
336                                               &res);
337         if (!ret) {
338                 *out_surf = vmw_res_to_srf(res);
339                 return 0;
340         }
341
342         *out_surf = NULL;
343         ret = vmw_user_bo_lookup(filp, handle, out_buf);
344         return ret;
345 }
346
347 /**
348  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
349  *
350  * @res:            The resource for which to allocate a backup buffer.
351  * @interruptible:  Whether any sleeps during allocation should be
352  *                  performed while interruptible.
353  */
354 static int vmw_resource_buf_alloc(struct vmw_resource *res,
355                                   bool interruptible)
356 {
357         unsigned long size = PFN_ALIGN(res->backup_size);
358         struct vmw_buffer_object *backup;
359         int ret;
360
361         if (likely(res->backup)) {
362                 BUG_ON(res->backup->base.base.size < size);
363                 return 0;
364         }
365
366         ret = vmw_bo_create(res->dev_priv, res->backup_size,
367                             res->func->backup_placement,
368                             interruptible, false,
369                             &vmw_bo_bo_free, &backup);
370         if (unlikely(ret != 0))
371                 goto out_no_bo;
372
373         res->backup = backup;
374
375 out_no_bo:
376         return ret;
377 }
378
379 /**
380  * vmw_resource_do_validate - Make a resource up-to-date and visible
381  *                            to the device.
382  *
383  * @res:            The resource to make visible to the device.
384  * @val_buf:        Information about a buffer possibly
385  *                  containing backup data if a bind operation is needed.
386  * @dirtying:       Transfer dirty regions.
387  *
388  * On hardware resource shortage, this function returns -EBUSY and
389  * should be retried once resources have been freed up.
390  */
391 static int vmw_resource_do_validate(struct vmw_resource *res,
392                                     struct ttm_validate_buffer *val_buf,
393                                     bool dirtying)
394 {
395         int ret = 0;
396         const struct vmw_res_func *func = res->func;
397
398         if (unlikely(res->id == -1)) {
399                 ret = func->create(res);
400                 if (unlikely(ret != 0))
401                         return ret;
402         }
403
404         if (func->bind &&
405             ((func->needs_backup && !vmw_resource_mob_attached(res) &&
406               val_buf->bo != NULL) ||
407              (!func->needs_backup && val_buf->bo != NULL))) {
408                 ret = func->bind(res, val_buf);
409                 if (unlikely(ret != 0))
410                         goto out_bind_failed;
411                 if (func->needs_backup)
412                         vmw_resource_mob_attach(res);
413         }
414
415         /*
416          * Handle the case where the backup mob is marked coherent but
417          * the resource isn't.
418          */
419         if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
420             !res->coherent) {
421                 if (res->backup->dirty && !res->dirty) {
422                         ret = func->dirty_alloc(res);
423                         if (ret)
424                                 return ret;
425                 } else if (!res->backup->dirty && res->dirty) {
426                         func->dirty_free(res);
427                 }
428         }
429
430         /*
431          * Transfer the dirty regions to the resource and update
432          * the resource.
433          */
434         if (res->dirty) {
435                 if (dirtying && !res->res_dirty) {
436                         pgoff_t start = res->backup_offset >> PAGE_SHIFT;
437                         pgoff_t end = __KERNEL_DIV_ROUND_UP
438                                 (res->backup_offset + res->backup_size,
439                                  PAGE_SIZE);
440
441                         vmw_bo_dirty_unmap(res->backup, start, end);
442                 }
443
444                 vmw_bo_dirty_transfer_to_res(res);
445                 return func->dirty_sync(res);
446         }
447
448         return 0;
449
450 out_bind_failed:
451         func->destroy(res);
452
453         return ret;
454 }
455
456 /**
457  * vmw_resource_unreserve - Unreserve a resource previously reserved for
458  * command submission.
459  *
460  * @res:               Pointer to the struct vmw_resource to unreserve.
461  * @dirty_set:         Change dirty status of the resource.
462  * @dirty:             When changing dirty status indicates the new status.
463  * @switch_backup:     Backup buffer has been switched.
464  * @new_backup:        Pointer to new backup buffer if command submission
465  *                     switched. May be NULL.
466  * @new_backup_offset: New backup offset if @switch_backup is true.
467  *
468  * Currently unreserving a resource means putting it back on the device's
469  * resource lru list, so that it can be evicted if necessary.
470  */
471 void vmw_resource_unreserve(struct vmw_resource *res,
472                             bool dirty_set,
473                             bool dirty,
474                             bool switch_backup,
475                             struct vmw_buffer_object *new_backup,
476                             unsigned long new_backup_offset)
477 {
478         struct vmw_private *dev_priv = res->dev_priv;
479
480         if (!list_empty(&res->lru_head))
481                 return;
482
483         if (switch_backup && new_backup != res->backup) {
484                 if (res->backup) {
485                         vmw_resource_mob_detach(res);
486                         if (res->coherent)
487                                 vmw_bo_dirty_release(res->backup);
488                         vmw_bo_unreference(&res->backup);
489                 }
490
491                 if (new_backup) {
492                         res->backup = vmw_bo_reference(new_backup);
493
494                         /*
495                          * The validation code should already have added a
496                          * dirty tracker here.
497                          */
498                         WARN_ON(res->coherent && !new_backup->dirty);
499
500                         vmw_resource_mob_attach(res);
501                 } else {
502                         res->backup = NULL;
503                 }
504         } else if (switch_backup && res->coherent) {
505                 vmw_bo_dirty_release(res->backup);
506         }
507
508         if (switch_backup)
509                 res->backup_offset = new_backup_offset;
510
511         if (dirty_set)
512                 res->res_dirty = dirty;
513
514         if (!res->func->may_evict || res->id == -1 || res->pin_count)
515                 return;
516
517         spin_lock(&dev_priv->resource_lock);
518         list_add_tail(&res->lru_head,
519                       &res->dev_priv->res_lru[res->func->res_type]);
520         spin_unlock(&dev_priv->resource_lock);
521 }
522
523 /**
524  * vmw_resource_check_buffer - Check whether a backup buffer is needed
525  *                             for a resource and in that case, allocate
526  *                             one, reserve and validate it.
527  *
528  * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
529  * @res:            The resource for which to allocate a backup buffer.
530  * @interruptible:  Whether any sleeps during allocation should be
531  *                  performed while interruptible.
532  * @val_buf:        On successful return contains data about the
533  *                  reserved and validated backup buffer.
534  */
535 static int
536 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
537                           struct vmw_resource *res,
538                           bool interruptible,
539                           struct ttm_validate_buffer *val_buf)
540 {
541         struct ttm_operation_ctx ctx = { true, false };
542         struct list_head val_list;
543         bool backup_dirty = false;
544         int ret;
545
546         if (unlikely(res->backup == NULL)) {
547                 ret = vmw_resource_buf_alloc(res, interruptible);
548                 if (unlikely(ret != 0))
549                         return ret;
550         }
551
552         INIT_LIST_HEAD(&val_list);
553         ttm_bo_get(&res->backup->base);
554         val_buf->bo = &res->backup->base;
555         val_buf->num_shared = 0;
556         list_add_tail(&val_buf->head, &val_list);
557         ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
558         if (unlikely(ret != 0))
559                 goto out_no_reserve;
560
561         if (res->func->needs_backup && !vmw_resource_mob_attached(res))
562                 return 0;
563
564         backup_dirty = res->backup_dirty;
565         ret = ttm_bo_validate(&res->backup->base,
566                               res->func->backup_placement,
567                               &ctx);
568
569         if (unlikely(ret != 0))
570                 goto out_no_validate;
571
572         return 0;
573
574 out_no_validate:
575         ttm_eu_backoff_reservation(ticket, &val_list);
576 out_no_reserve:
577         ttm_bo_put(val_buf->bo);
578         val_buf->bo = NULL;
579         if (backup_dirty)
580                 vmw_bo_unreference(&res->backup);
581
582         return ret;
583 }
584
585 /*
586  * vmw_resource_reserve - Reserve a resource for command submission
587  *
588  * @res:            The resource to reserve.
589  *
590  * This function takes the resource off the LRU list and make sure
591  * a backup buffer is present for guest-backed resources. However,
592  * the buffer may not be bound to the resource at this point.
593  *
594  */
595 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
596                          bool no_backup)
597 {
598         struct vmw_private *dev_priv = res->dev_priv;
599         int ret;
600
601         spin_lock(&dev_priv->resource_lock);
602         list_del_init(&res->lru_head);
603         spin_unlock(&dev_priv->resource_lock);
604
605         if (res->func->needs_backup && res->backup == NULL &&
606             !no_backup) {
607                 ret = vmw_resource_buf_alloc(res, interruptible);
608                 if (unlikely(ret != 0)) {
609                         DRM_ERROR("Failed to allocate a backup buffer "
610                                   "of size %lu. bytes\n",
611                                   (unsigned long) res->backup_size);
612                         return ret;
613                 }
614         }
615
616         return 0;
617 }
618
619 /**
620  * vmw_resource_backoff_reservation - Unreserve and unreference a
621  *                                    backup buffer
622  *.
623  * @ticket:         The ww acquire ctx used for reservation.
624  * @val_buf:        Backup buffer information.
625  */
626 static void
627 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
628                                  struct ttm_validate_buffer *val_buf)
629 {
630         struct list_head val_list;
631
632         if (likely(val_buf->bo == NULL))
633                 return;
634
635         INIT_LIST_HEAD(&val_list);
636         list_add_tail(&val_buf->head, &val_list);
637         ttm_eu_backoff_reservation(ticket, &val_list);
638         ttm_bo_put(val_buf->bo);
639         val_buf->bo = NULL;
640 }
641
642 /**
643  * vmw_resource_do_evict - Evict a resource, and transfer its data
644  *                         to a backup buffer.
645  *
646  * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
647  * @res:            The resource to evict.
648  * @interruptible:  Whether to wait interruptible.
649  */
650 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
651                                  struct vmw_resource *res, bool interruptible)
652 {
653         struct ttm_validate_buffer val_buf;
654         const struct vmw_res_func *func = res->func;
655         int ret;
656
657         BUG_ON(!func->may_evict);
658
659         val_buf.bo = NULL;
660         val_buf.num_shared = 0;
661         ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
662         if (unlikely(ret != 0))
663                 return ret;
664
665         if (unlikely(func->unbind != NULL &&
666                      (!func->needs_backup || vmw_resource_mob_attached(res)))) {
667                 ret = func->unbind(res, res->res_dirty, &val_buf);
668                 if (unlikely(ret != 0))
669                         goto out_no_unbind;
670                 vmw_resource_mob_detach(res);
671         }
672         ret = func->destroy(res);
673         res->backup_dirty = true;
674         res->res_dirty = false;
675 out_no_unbind:
676         vmw_resource_backoff_reservation(ticket, &val_buf);
677
678         return ret;
679 }
680
681
682 /**
683  * vmw_resource_validate - Make a resource up-to-date and visible
684  *                         to the device.
685  * @res: The resource to make visible to the device.
686  * @intr: Perform waits interruptible if possible.
687  * @dirtying: Pending GPU operation will dirty the resource
688  *
689  * On succesful return, any backup DMA buffer pointed to by @res->backup will
690  * be reserved and validated.
691  * On hardware resource shortage, this function will repeatedly evict
692  * resources of the same type until the validation succeeds.
693  *
694  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
695  * on failure.
696  */
697 int vmw_resource_validate(struct vmw_resource *res, bool intr,
698                           bool dirtying)
699 {
700         int ret;
701         struct vmw_resource *evict_res;
702         struct vmw_private *dev_priv = res->dev_priv;
703         struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
704         struct ttm_validate_buffer val_buf;
705         unsigned err_count = 0;
706
707         if (!res->func->create)
708                 return 0;
709
710         val_buf.bo = NULL;
711         val_buf.num_shared = 0;
712         if (res->backup)
713                 val_buf.bo = &res->backup->base;
714         do {
715                 ret = vmw_resource_do_validate(res, &val_buf, dirtying);
716                 if (likely(ret != -EBUSY))
717                         break;
718
719                 spin_lock(&dev_priv->resource_lock);
720                 if (list_empty(lru_list) || !res->func->may_evict) {
721                         DRM_ERROR("Out of device device resources "
722                                   "for %s.\n", res->func->type_name);
723                         ret = -EBUSY;
724                         spin_unlock(&dev_priv->resource_lock);
725                         break;
726                 }
727
728                 evict_res = vmw_resource_reference
729                         (list_first_entry(lru_list, struct vmw_resource,
730                                           lru_head));
731                 list_del_init(&evict_res->lru_head);
732
733                 spin_unlock(&dev_priv->resource_lock);
734
735                 /* Trylock backup buffers with a NULL ticket. */
736                 ret = vmw_resource_do_evict(NULL, evict_res, intr);
737                 if (unlikely(ret != 0)) {
738                         spin_lock(&dev_priv->resource_lock);
739                         list_add_tail(&evict_res->lru_head, lru_list);
740                         spin_unlock(&dev_priv->resource_lock);
741                         if (ret == -ERESTARTSYS ||
742                             ++err_count > VMW_RES_EVICT_ERR_COUNT) {
743                                 vmw_resource_unreference(&evict_res);
744                                 goto out_no_validate;
745                         }
746                 }
747
748                 vmw_resource_unreference(&evict_res);
749         } while (1);
750
751         if (unlikely(ret != 0))
752                 goto out_no_validate;
753         else if (!res->func->needs_backup && res->backup) {
754                 WARN_ON_ONCE(vmw_resource_mob_attached(res));
755                 vmw_bo_unreference(&res->backup);
756         }
757
758         return 0;
759
760 out_no_validate:
761         return ret;
762 }
763
764
765 /**
766  * vmw_resource_unbind_list
767  *
768  * @vbo: Pointer to the current backing MOB.
769  *
770  * Evicts the Guest Backed hardware resource if the backup
771  * buffer is being moved out of MOB memory.
772  * Note that this function will not race with the resource
773  * validation code, since resource validation and eviction
774  * both require the backup buffer to be reserved.
775  */
776 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
777 {
778         struct ttm_validate_buffer val_buf = {
779                 .bo = &vbo->base,
780                 .num_shared = 0
781         };
782
783         dma_resv_assert_held(vbo->base.base.resv);
784         while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
785                 struct rb_node *node = vbo->res_tree.rb_node;
786                 struct vmw_resource *res =
787                         container_of(node, struct vmw_resource, mob_node);
788
789                 if (!WARN_ON_ONCE(!res->func->unbind))
790                         (void) res->func->unbind(res, res->res_dirty, &val_buf);
791
792                 res->backup_dirty = true;
793                 res->res_dirty = false;
794                 vmw_resource_mob_detach(res);
795         }
796
797         (void) ttm_bo_wait(&vbo->base, false, false);
798 }
799
800
801 /**
802  * vmw_query_readback_all - Read back cached query states
803  *
804  * @dx_query_mob: Buffer containing the DX query MOB
805  *
806  * Read back cached states from the device if they exist.  This function
807  * assumings binding_mutex is held.
808  */
809 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
810 {
811         struct vmw_resource *dx_query_ctx;
812         struct vmw_private *dev_priv;
813         struct {
814                 SVGA3dCmdHeader header;
815                 SVGA3dCmdDXReadbackAllQuery body;
816         } *cmd;
817
818
819         /* No query bound, so do nothing */
820         if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
821                 return 0;
822
823         dx_query_ctx = dx_query_mob->dx_query_ctx;
824         dev_priv     = dx_query_ctx->dev_priv;
825
826         cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
827         if (unlikely(cmd == NULL))
828                 return -ENOMEM;
829
830         cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
831         cmd->header.size = sizeof(cmd->body);
832         cmd->body.cid    = dx_query_ctx->id;
833
834         vmw_cmd_commit(dev_priv, sizeof(*cmd));
835
836         /* Triggers a rebind the next time affected context is bound */
837         dx_query_mob->dx_query_ctx = NULL;
838
839         return 0;
840 }
841
842
843
844 /**
845  * vmw_query_move_notify - Read back cached query states
846  *
847  * @bo: The TTM buffer object about to move.
848  * @old_mem: The memory region @bo is moving from.
849  * @new_mem: The memory region @bo is moving to.
850  *
851  * Called before the query MOB is swapped out to read back cached query
852  * states from the device.
853  */
854 void vmw_query_move_notify(struct ttm_buffer_object *bo,
855                            struct ttm_resource *old_mem,
856                            struct ttm_resource *new_mem)
857 {
858         struct vmw_buffer_object *dx_query_mob;
859         struct ttm_device *bdev = bo->bdev;
860         struct vmw_private *dev_priv;
861
862         dev_priv = container_of(bdev, struct vmw_private, bdev);
863
864         mutex_lock(&dev_priv->binding_mutex);
865
866         /* If BO is being moved from MOB to system memory */
867         if (new_mem->mem_type == TTM_PL_SYSTEM &&
868             old_mem->mem_type == VMW_PL_MOB) {
869                 struct vmw_fence_obj *fence;
870
871                 dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
872                 if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
873                         mutex_unlock(&dev_priv->binding_mutex);
874                         return;
875                 }
876
877                 (void) vmw_query_readback_all(dx_query_mob);
878                 mutex_unlock(&dev_priv->binding_mutex);
879
880                 /* Create a fence and attach the BO to it */
881                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
882                 vmw_bo_fence_single(bo, fence);
883
884                 if (fence != NULL)
885                         vmw_fence_obj_unreference(&fence);
886
887                 (void) ttm_bo_wait(bo, false, false);
888         } else
889                 mutex_unlock(&dev_priv->binding_mutex);
890 }
891
892 /**
893  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
894  *
895  * @res:            The resource being queried.
896  */
897 bool vmw_resource_needs_backup(const struct vmw_resource *res)
898 {
899         return res->func->needs_backup;
900 }
901
902 /**
903  * vmw_resource_evict_type - Evict all resources of a specific type
904  *
905  * @dev_priv:       Pointer to a device private struct
906  * @type:           The resource type to evict
907  *
908  * To avoid thrashing starvation or as part of the hibernation sequence,
909  * try to evict all evictable resources of a specific type.
910  */
911 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
912                                     enum vmw_res_type type)
913 {
914         struct list_head *lru_list = &dev_priv->res_lru[type];
915         struct vmw_resource *evict_res;
916         unsigned err_count = 0;
917         int ret;
918         struct ww_acquire_ctx ticket;
919
920         do {
921                 spin_lock(&dev_priv->resource_lock);
922
923                 if (list_empty(lru_list))
924                         goto out_unlock;
925
926                 evict_res = vmw_resource_reference(
927                         list_first_entry(lru_list, struct vmw_resource,
928                                          lru_head));
929                 list_del_init(&evict_res->lru_head);
930                 spin_unlock(&dev_priv->resource_lock);
931
932                 /* Wait lock backup buffers with a ticket. */
933                 ret = vmw_resource_do_evict(&ticket, evict_res, false);
934                 if (unlikely(ret != 0)) {
935                         spin_lock(&dev_priv->resource_lock);
936                         list_add_tail(&evict_res->lru_head, lru_list);
937                         spin_unlock(&dev_priv->resource_lock);
938                         if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
939                                 vmw_resource_unreference(&evict_res);
940                                 return;
941                         }
942                 }
943
944                 vmw_resource_unreference(&evict_res);
945         } while (1);
946
947 out_unlock:
948         spin_unlock(&dev_priv->resource_lock);
949 }
950
951 /**
952  * vmw_resource_evict_all - Evict all evictable resources
953  *
954  * @dev_priv:       Pointer to a device private struct
955  *
956  * To avoid thrashing starvation or as part of the hibernation sequence,
957  * evict all evictable resources. In particular this means that all
958  * guest-backed resources that are registered with the device are
959  * evicted and the OTable becomes clean.
960  */
961 void vmw_resource_evict_all(struct vmw_private *dev_priv)
962 {
963         enum vmw_res_type type;
964
965         mutex_lock(&dev_priv->cmdbuf_mutex);
966
967         for (type = 0; type < vmw_res_max; ++type)
968                 vmw_resource_evict_type(dev_priv, type);
969
970         mutex_unlock(&dev_priv->cmdbuf_mutex);
971 }
972
973 /*
974  * vmw_resource_pin - Add a pin reference on a resource
975  *
976  * @res: The resource to add a pin reference on
977  *
978  * This function adds a pin reference, and if needed validates the resource.
979  * Having a pin reference means that the resource can never be evicted, and
980  * its id will never change as long as there is a pin reference.
981  * This function returns 0 on success and a negative error code on failure.
982  */
983 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
984 {
985         struct ttm_operation_ctx ctx = { interruptible, false };
986         struct vmw_private *dev_priv = res->dev_priv;
987         int ret;
988
989         mutex_lock(&dev_priv->cmdbuf_mutex);
990         ret = vmw_resource_reserve(res, interruptible, false);
991         if (ret)
992                 goto out_no_reserve;
993
994         if (res->pin_count == 0) {
995                 struct vmw_buffer_object *vbo = NULL;
996
997                 if (res->backup) {
998                         vbo = res->backup;
999
1000                         ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1001                         if (ret)
1002                                 goto out_no_validate;
1003                         if (!vbo->base.pin_count) {
1004                                 ret = ttm_bo_validate
1005                                         (&vbo->base,
1006                                          res->func->backup_placement,
1007                                          &ctx);
1008                                 if (ret) {
1009                                         ttm_bo_unreserve(&vbo->base);
1010                                         goto out_no_validate;
1011                                 }
1012                         }
1013
1014                         /* Do we really need to pin the MOB as well? */
1015                         vmw_bo_pin_reserved(vbo, true);
1016                 }
1017                 ret = vmw_resource_validate(res, interruptible, true);
1018                 if (vbo)
1019                         ttm_bo_unreserve(&vbo->base);
1020                 if (ret)
1021                         goto out_no_validate;
1022         }
1023         res->pin_count++;
1024
1025 out_no_validate:
1026         vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1027 out_no_reserve:
1028         mutex_unlock(&dev_priv->cmdbuf_mutex);
1029
1030         return ret;
1031 }
1032
1033 /**
1034  * vmw_resource_unpin - Remove a pin reference from a resource
1035  *
1036  * @res: The resource to remove a pin reference from
1037  *
1038  * Having a pin reference means that the resource can never be evicted, and
1039  * its id will never change as long as there is a pin reference.
1040  */
1041 void vmw_resource_unpin(struct vmw_resource *res)
1042 {
1043         struct vmw_private *dev_priv = res->dev_priv;
1044         int ret;
1045
1046         mutex_lock(&dev_priv->cmdbuf_mutex);
1047
1048         ret = vmw_resource_reserve(res, false, true);
1049         WARN_ON(ret);
1050
1051         WARN_ON(res->pin_count == 0);
1052         if (--res->pin_count == 0 && res->backup) {
1053                 struct vmw_buffer_object *vbo = res->backup;
1054
1055                 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1056                 vmw_bo_pin_reserved(vbo, false);
1057                 ttm_bo_unreserve(&vbo->base);
1058         }
1059
1060         vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1061
1062         mutex_unlock(&dev_priv->cmdbuf_mutex);
1063 }
1064
1065 /**
1066  * vmw_res_type - Return the resource type
1067  *
1068  * @res: Pointer to the resource
1069  */
1070 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1071 {
1072         return res->func->res_type;
1073 }
1074
1075 /**
1076  * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1077  * sequential range of touched backing store memory.
1078  * @res: The resource.
1079  * @start: The first page touched.
1080  * @end: The last page touched + 1.
1081  */
1082 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1083                                pgoff_t end)
1084 {
1085         if (res->dirty)
1086                 res->func->dirty_range_add(res, start << PAGE_SHIFT,
1087                                            end << PAGE_SHIFT);
1088 }
1089
1090 /**
1091  * vmw_resources_clean - Clean resources intersecting a mob range
1092  * @vbo: The mob buffer object
1093  * @start: The mob page offset starting the range
1094  * @end: The mob page offset ending the range
1095  * @num_prefault: Returns how many pages including the first have been
1096  * cleaned and are ok to prefault
1097  */
1098 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1099                         pgoff_t end, pgoff_t *num_prefault)
1100 {
1101         struct rb_node *cur = vbo->res_tree.rb_node;
1102         struct vmw_resource *found = NULL;
1103         unsigned long res_start = start << PAGE_SHIFT;
1104         unsigned long res_end = end << PAGE_SHIFT;
1105         unsigned long last_cleaned = 0;
1106
1107         /*
1108          * Find the resource with lowest backup_offset that intersects the
1109          * range.
1110          */
1111         while (cur) {
1112                 struct vmw_resource *cur_res =
1113                         container_of(cur, struct vmw_resource, mob_node);
1114
1115                 if (cur_res->backup_offset >= res_end) {
1116                         cur = cur->rb_left;
1117                 } else if (cur_res->backup_offset + cur_res->backup_size <=
1118                            res_start) {
1119                         cur = cur->rb_right;
1120                 } else {
1121                         found = cur_res;
1122                         cur = cur->rb_left;
1123                         /* Continue to look for resources with lower offsets */
1124                 }
1125         }
1126
1127         /*
1128          * In order of increasing backup_offset, clean dirty resorces
1129          * intersecting the range.
1130          */
1131         while (found) {
1132                 if (found->res_dirty) {
1133                         int ret;
1134
1135                         if (!found->func->clean)
1136                                 return -EINVAL;
1137
1138                         ret = found->func->clean(found);
1139                         if (ret)
1140                                 return ret;
1141
1142                         found->res_dirty = false;
1143                 }
1144                 last_cleaned = found->backup_offset + found->backup_size;
1145                 cur = rb_next(&found->mob_node);
1146                 if (!cur)
1147                         break;
1148
1149                 found = container_of(cur, struct vmw_resource, mob_node);
1150                 if (found->backup_offset >= res_end)
1151                         break;
1152         }
1153
1154         /*
1155          * Set number of pages allowed prefaulting and fence the buffer object
1156          */
1157         *num_prefault = 1;
1158         if (last_cleaned > res_start) {
1159                 struct ttm_buffer_object *bo = &vbo->base;
1160
1161                 *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1162                                                       PAGE_SIZE);
1163                 vmw_bo_fence_single(bo, NULL);
1164                 if (bo->moving)
1165                         dma_fence_put(bo->moving);
1166
1167                 return dma_resv_get_singleton(bo->base.resv, false,
1168                                               &bo->moving);
1169         }
1170
1171         return 0;
1172 }