drm/ttm: device naming cleanup
[linux-2.6-microblaze.git] / drivers / gpu / drm / vmwgfx / vmwgfx_bo.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28
29 #include <drm/ttm/ttm_placement.h>
30
31 #include "vmwgfx_drv.h"
32 #include "ttm_object.h"
33
34
35 /**
36  * struct vmw_user_buffer_object - User-space-visible buffer object
37  *
38  * @prime: The prime object providing user visibility.
39  * @vbo: The struct vmw_buffer_object
40  */
41 struct vmw_user_buffer_object {
42         struct ttm_prime_object prime;
43         struct vmw_buffer_object vbo;
44 };
45
46
47 /**
48  * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
49  * vmw_buffer_object.
50  *
51  * @bo: Pointer to the TTM buffer object.
52  * Return: Pointer to the struct vmw_buffer_object embedding the
53  * TTM buffer object.
54  */
55 static struct vmw_buffer_object *
56 vmw_buffer_object(struct ttm_buffer_object *bo)
57 {
58         return container_of(bo, struct vmw_buffer_object, base);
59 }
60
61
62 /**
63  * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
64  * vmw_user_buffer_object.
65  *
66  * @bo: Pointer to the TTM buffer object.
67  * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
68  * object.
69  */
70 static struct vmw_user_buffer_object *
71 vmw_user_buffer_object(struct ttm_buffer_object *bo)
72 {
73         struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
74
75         return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
76 }
77
78
79 /**
80  * vmw_bo_pin_in_placement - Validate a buffer to placement.
81  *
82  * @dev_priv:  Driver private.
83  * @buf:  DMA buffer to move.
84  * @placement:  The placement to pin it.
85  * @interruptible:  Use interruptible wait.
86  * Return: Zero on success, Negative error code on failure. In particular
87  * -ERESTARTSYS if interrupted by a signal
88  */
89 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
90                             struct vmw_buffer_object *buf,
91                             struct ttm_placement *placement,
92                             bool interruptible)
93 {
94         struct ttm_operation_ctx ctx = {interruptible, false };
95         struct ttm_buffer_object *bo = &buf->base;
96         int ret;
97         uint32_t new_flags;
98
99         ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
100         if (unlikely(ret != 0))
101                 return ret;
102
103         vmw_execbuf_release_pinned_bo(dev_priv);
104
105         ret = ttm_bo_reserve(bo, interruptible, false, NULL);
106         if (unlikely(ret != 0))
107                 goto err;
108
109         if (buf->base.pin_count > 0)
110                 ret = ttm_bo_mem_compat(placement, &bo->mem,
111                                         &new_flags) == true ? 0 : -EINVAL;
112         else
113                 ret = ttm_bo_validate(bo, placement, &ctx);
114
115         if (!ret)
116                 vmw_bo_pin_reserved(buf, true);
117
118         ttm_bo_unreserve(bo);
119
120 err:
121         ttm_write_unlock(&dev_priv->reservation_sem);
122         return ret;
123 }
124
125
126 /**
127  * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
128  *
129  * This function takes the reservation_sem in write mode.
130  * Flushes and unpins the query bo to avoid failures.
131  *
132  * @dev_priv:  Driver private.
133  * @buf:  DMA buffer to move.
134  * @interruptible:  Use interruptible wait.
135  * Return: Zero on success, Negative error code on failure. In particular
136  * -ERESTARTSYS if interrupted by a signal
137  */
138 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
139                               struct vmw_buffer_object *buf,
140                               bool interruptible)
141 {
142         struct ttm_operation_ctx ctx = {interruptible, false };
143         struct ttm_buffer_object *bo = &buf->base;
144         int ret;
145         uint32_t new_flags;
146
147         ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
148         if (unlikely(ret != 0))
149                 return ret;
150
151         vmw_execbuf_release_pinned_bo(dev_priv);
152
153         ret = ttm_bo_reserve(bo, interruptible, false, NULL);
154         if (unlikely(ret != 0))
155                 goto err;
156
157         if (buf->base.pin_count > 0) {
158                 ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
159                                         &new_flags) == true ? 0 : -EINVAL;
160                 goto out_unreserve;
161         }
162
163         ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
164         if (likely(ret == 0) || ret == -ERESTARTSYS)
165                 goto out_unreserve;
166
167         ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
168
169 out_unreserve:
170         if (!ret)
171                 vmw_bo_pin_reserved(buf, true);
172
173         ttm_bo_unreserve(bo);
174 err:
175         ttm_write_unlock(&dev_priv->reservation_sem);
176         return ret;
177 }
178
179
180 /**
181  * vmw_bo_pin_in_vram - Move a buffer to vram.
182  *
183  * This function takes the reservation_sem in write mode.
184  * Flushes and unpins the query bo to avoid failures.
185  *
186  * @dev_priv:  Driver private.
187  * @buf:  DMA buffer to move.
188  * @interruptible:  Use interruptible wait.
189  * Return: Zero on success, Negative error code on failure. In particular
190  * -ERESTARTSYS if interrupted by a signal
191  */
192 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
193                        struct vmw_buffer_object *buf,
194                        bool interruptible)
195 {
196         return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
197                                        interruptible);
198 }
199
200
201 /**
202  * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
203  *
204  * This function takes the reservation_sem in write mode.
205  * Flushes and unpins the query bo to avoid failures.
206  *
207  * @dev_priv:  Driver private.
208  * @buf:  DMA buffer to pin.
209  * @interruptible:  Use interruptible wait.
210  * Return: Zero on success, Negative error code on failure. In particular
211  * -ERESTARTSYS if interrupted by a signal
212  */
213 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
214                                 struct vmw_buffer_object *buf,
215                                 bool interruptible)
216 {
217         struct ttm_operation_ctx ctx = {interruptible, false };
218         struct ttm_buffer_object *bo = &buf->base;
219         struct ttm_placement placement;
220         struct ttm_place place;
221         int ret = 0;
222         uint32_t new_flags;
223
224         place = vmw_vram_placement.placement[0];
225         place.lpfn = bo->mem.num_pages;
226         placement.num_placement = 1;
227         placement.placement = &place;
228         placement.num_busy_placement = 1;
229         placement.busy_placement = &place;
230
231         ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
232         if (unlikely(ret != 0))
233                 return ret;
234
235         vmw_execbuf_release_pinned_bo(dev_priv);
236         ret = ttm_bo_reserve(bo, interruptible, false, NULL);
237         if (unlikely(ret != 0))
238                 goto err_unlock;
239
240         /*
241          * Is this buffer already in vram but not at the start of it?
242          * In that case, evict it first because TTM isn't good at handling
243          * that situation.
244          */
245         if (bo->mem.mem_type == TTM_PL_VRAM &&
246             bo->mem.start < bo->mem.num_pages &&
247             bo->mem.start > 0 &&
248             buf->base.pin_count == 0) {
249                 ctx.interruptible = false;
250                 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
251         }
252
253         if (buf->base.pin_count > 0)
254                 ret = ttm_bo_mem_compat(&placement, &bo->mem,
255                                         &new_flags) == true ? 0 : -EINVAL;
256         else
257                 ret = ttm_bo_validate(bo, &placement, &ctx);
258
259         /* For some reason we didn't end up at the start of vram */
260         WARN_ON(ret == 0 && bo->mem.start != 0);
261         if (!ret)
262                 vmw_bo_pin_reserved(buf, true);
263
264         ttm_bo_unreserve(bo);
265 err_unlock:
266         ttm_write_unlock(&dev_priv->reservation_sem);
267
268         return ret;
269 }
270
271
272 /**
273  * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
274  *
275  * This function takes the reservation_sem in write mode.
276  *
277  * @dev_priv:  Driver private.
278  * @buf:  DMA buffer to unpin.
279  * @interruptible:  Use interruptible wait.
280  * Return: Zero on success, Negative error code on failure. In particular
281  * -ERESTARTSYS if interrupted by a signal
282  */
283 int vmw_bo_unpin(struct vmw_private *dev_priv,
284                  struct vmw_buffer_object *buf,
285                  bool interruptible)
286 {
287         struct ttm_buffer_object *bo = &buf->base;
288         int ret;
289
290         ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
291         if (unlikely(ret != 0))
292                 return ret;
293
294         ret = ttm_bo_reserve(bo, interruptible, false, NULL);
295         if (unlikely(ret != 0))
296                 goto err;
297
298         vmw_bo_pin_reserved(buf, false);
299
300         ttm_bo_unreserve(bo);
301
302 err:
303         ttm_read_unlock(&dev_priv->reservation_sem);
304         return ret;
305 }
306
307 /**
308  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
309  * of a buffer.
310  *
311  * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
312  * @ptr: SVGAGuestPtr returning the result.
313  */
314 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
315                           SVGAGuestPtr *ptr)
316 {
317         if (bo->mem.mem_type == TTM_PL_VRAM) {
318                 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
319                 ptr->offset = bo->mem.start << PAGE_SHIFT;
320         } else {
321                 ptr->gmrId = bo->mem.start;
322                 ptr->offset = 0;
323         }
324 }
325
326
327 /**
328  * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
329  *
330  * @vbo: The buffer object. Must be reserved.
331  * @pin: Whether to pin or unpin.
332  *
333  */
334 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
335 {
336         struct ttm_operation_ctx ctx = { false, true };
337         struct ttm_place pl;
338         struct ttm_placement placement;
339         struct ttm_buffer_object *bo = &vbo->base;
340         uint32_t old_mem_type = bo->mem.mem_type;
341         int ret;
342
343         dma_resv_assert_held(bo->base.resv);
344
345         if (pin == !!bo->pin_count)
346                 return;
347
348         pl.fpfn = 0;
349         pl.lpfn = 0;
350         pl.mem_type = bo->mem.mem_type;
351         pl.flags = bo->mem.placement;
352
353         memset(&placement, 0, sizeof(placement));
354         placement.num_placement = 1;
355         placement.placement = &pl;
356
357         ret = ttm_bo_validate(bo, &placement, &ctx);
358
359         BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
360
361         if (pin)
362                 ttm_bo_pin(bo);
363         else
364                 ttm_bo_unpin(bo);
365 }
366
367 /**
368  * vmw_bo_map_and_cache - Map a buffer object and cache the map
369  *
370  * @vbo: The buffer object to map
371  * Return: A kernel virtual address or NULL if mapping failed.
372  *
373  * This function maps a buffer object into the kernel address space, or
374  * returns the virtual kernel address of an already existing map. The virtual
375  * address remains valid as long as the buffer object is pinned or reserved.
376  * The cached map is torn down on either
377  * 1) Buffer object move
378  * 2) Buffer object swapout
379  * 3) Buffer object destruction
380  *
381  */
382 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
383 {
384         struct ttm_buffer_object *bo = &vbo->base;
385         bool not_used;
386         void *virtual;
387         int ret;
388
389         virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
390         if (virtual)
391                 return virtual;
392
393         ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
394         if (ret)
395                 DRM_ERROR("Buffer object map failed: %d.\n", ret);
396
397         return ttm_kmap_obj_virtual(&vbo->map, &not_used);
398 }
399
400
401 /**
402  * vmw_bo_unmap - Tear down a cached buffer object map.
403  *
404  * @vbo: The buffer object whose map we are tearing down.
405  *
406  * This function tears down a cached map set up using
407  * vmw_buffer_object_map_and_cache().
408  */
409 void vmw_bo_unmap(struct vmw_buffer_object *vbo)
410 {
411         if (vbo->map.bo == NULL)
412                 return;
413
414         ttm_bo_kunmap(&vbo->map);
415 }
416
417
418 /**
419  * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
420  *
421  * @dev_priv: Pointer to a struct vmw_private identifying the device.
422  * @size: The requested buffer size.
423  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
424  */
425 static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
426                               bool user)
427 {
428         static size_t struct_size, user_struct_size;
429         size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
430         size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
431
432         if (unlikely(struct_size == 0)) {
433                 size_t backend_size = ttm_round_pot(vmw_tt_size);
434
435                 struct_size = backend_size +
436                         ttm_round_pot(sizeof(struct vmw_buffer_object));
437                 user_struct_size = backend_size +
438                   ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
439                                       TTM_OBJ_EXTRA_SIZE;
440         }
441
442         if (dev_priv->map_mode == vmw_dma_alloc_coherent)
443                 page_array_size +=
444                         ttm_round_pot(num_pages * sizeof(dma_addr_t));
445
446         return ((user) ? user_struct_size : struct_size) +
447                 page_array_size;
448 }
449
450
451 /**
452  * vmw_bo_bo_free - vmw buffer object destructor
453  *
454  * @bo: Pointer to the embedded struct ttm_buffer_object
455  */
456 void vmw_bo_bo_free(struct ttm_buffer_object *bo)
457 {
458         struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
459
460         WARN_ON(vmw_bo->dirty);
461         WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
462         vmw_bo_unmap(vmw_bo);
463         kfree(vmw_bo);
464 }
465
466
467 /**
468  * vmw_user_bo_destroy - vmw buffer object destructor
469  *
470  * @bo: Pointer to the embedded struct ttm_buffer_object
471  */
472 static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
473 {
474         struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
475         struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
476
477         WARN_ON(vbo->dirty);
478         WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
479         vmw_bo_unmap(vbo);
480         ttm_prime_object_kfree(vmw_user_bo, prime);
481 }
482
483 /**
484  * vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
485  *
486  * @dev_priv: Pointer to the device private struct
487  * @size: size of the BO we need
488  * @placement: where to put it
489  * @p_bo: resulting BO
490  *
491  * Creates and pin a simple BO for in kernel use.
492  */
493 int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
494                          struct ttm_placement *placement,
495                          struct ttm_buffer_object **p_bo)
496 {
497         unsigned npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
498         struct ttm_operation_ctx ctx = { false, false };
499         struct ttm_buffer_object *bo;
500         size_t acc_size;
501         int ret;
502
503         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
504         if (unlikely(!bo))
505                 return -ENOMEM;
506
507         acc_size = ttm_round_pot(sizeof(*bo));
508         acc_size += ttm_round_pot(npages * sizeof(void *));
509         acc_size += ttm_round_pot(sizeof(struct ttm_tt));
510         ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
511                                    ttm_bo_type_device, placement, 0,
512                                    &ctx, acc_size, NULL, NULL, NULL);
513         if (unlikely(ret))
514                 goto error_free;
515
516         ttm_bo_pin(bo);
517         ttm_bo_unreserve(bo);
518         *p_bo = bo;
519
520         return 0;
521
522 error_free:
523         kfree(bo);
524         return ret;
525 }
526
527 /**
528  * vmw_bo_init - Initialize a vmw buffer object
529  *
530  * @dev_priv: Pointer to the device private struct
531  * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
532  * @size: Buffer object size in bytes.
533  * @placement: Initial placement.
534  * @interruptible: Whether waits should be performed interruptible.
535  * @pin: If the BO should be created pinned at a fixed location.
536  * @bo_free: The buffer object destructor.
537  * Returns: Zero on success, negative error code on error.
538  *
539  * Note that on error, the code will free the buffer object.
540  */
541 int vmw_bo_init(struct vmw_private *dev_priv,
542                 struct vmw_buffer_object *vmw_bo,
543                 size_t size, struct ttm_placement *placement,
544                 bool interruptible, bool pin,
545                 void (*bo_free)(struct ttm_buffer_object *bo))
546 {
547         struct ttm_operation_ctx ctx = { interruptible, false };
548         struct ttm_device *bdev = &dev_priv->bdev;
549         size_t acc_size;
550         int ret;
551         bool user = (bo_free == &vmw_user_bo_destroy);
552
553         WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
554
555         acc_size = vmw_bo_acc_size(dev_priv, size, user);
556         memset(vmw_bo, 0, sizeof(*vmw_bo));
557         BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
558         vmw_bo->base.priority = 3;
559         vmw_bo->res_tree = RB_ROOT;
560
561         ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
562                                    ttm_bo_type_device, placement,
563                                    0, &ctx, acc_size, NULL, NULL, bo_free);
564         if (unlikely(ret))
565                 return ret;
566
567         if (pin)
568                 ttm_bo_pin(&vmw_bo->base);
569         ttm_bo_unreserve(&vmw_bo->base);
570         return 0;
571 }
572
573
574 /**
575  * vmw_user_bo_release - TTM reference base object release callback for
576  * vmw user buffer objects
577  *
578  * @p_base: The TTM base object pointer about to be unreferenced.
579  *
580  * Clears the TTM base object pointer and drops the reference the
581  * base object has on the underlying struct vmw_buffer_object.
582  */
583 static void vmw_user_bo_release(struct ttm_base_object **p_base)
584 {
585         struct vmw_user_buffer_object *vmw_user_bo;
586         struct ttm_base_object *base = *p_base;
587
588         *p_base = NULL;
589
590         if (unlikely(base == NULL))
591                 return;
592
593         vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
594                                    prime.base);
595         ttm_bo_put(&vmw_user_bo->vbo.base);
596 }
597
598
599 /**
600  * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
601  * for vmw user buffer objects
602  *
603  * @base: Pointer to the TTM base object
604  * @ref_type: Reference type of the reference reaching zero.
605  *
606  * Called when user-space drops its last synccpu reference on the buffer
607  * object, Either explicitly or as part of a cleanup file close.
608  */
609 static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
610                                         enum ttm_ref_type ref_type)
611 {
612         struct vmw_user_buffer_object *user_bo;
613
614         user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
615
616         switch (ref_type) {
617         case TTM_REF_SYNCCPU_WRITE:
618                 atomic_dec(&user_bo->vbo.cpu_writers);
619                 break;
620         default:
621                 WARN_ONCE(true, "Undefined buffer object reference release.\n");
622         }
623 }
624
625
626 /**
627  * vmw_user_bo_alloc - Allocate a user buffer object
628  *
629  * @dev_priv: Pointer to a struct device private.
630  * @tfile: Pointer to a struct ttm_object_file on which to register the user
631  * object.
632  * @size: Size of the buffer object.
633  * @shareable: Boolean whether the buffer is shareable with other open files.
634  * @handle: Pointer to where the handle value should be assigned.
635  * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
636  * should be assigned.
637  * @p_base: The TTM base object pointer about to be allocated.
638  * Return: Zero on success, negative error code on error.
639  */
640 int vmw_user_bo_alloc(struct vmw_private *dev_priv,
641                       struct ttm_object_file *tfile,
642                       uint32_t size,
643                       bool shareable,
644                       uint32_t *handle,
645                       struct vmw_buffer_object **p_vbo,
646                       struct ttm_base_object **p_base)
647 {
648         struct vmw_user_buffer_object *user_bo;
649         int ret;
650
651         user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
652         if (unlikely(!user_bo)) {
653                 DRM_ERROR("Failed to allocate a buffer.\n");
654                 return -ENOMEM;
655         }
656
657         ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
658                           (dev_priv->has_mob) ?
659                           &vmw_sys_placement :
660                           &vmw_vram_sys_placement, true, false,
661                           &vmw_user_bo_destroy);
662         if (unlikely(ret != 0))
663                 return ret;
664
665         ttm_bo_get(&user_bo->vbo.base);
666         ret = ttm_prime_object_init(tfile,
667                                     size,
668                                     &user_bo->prime,
669                                     shareable,
670                                     ttm_buffer_type,
671                                     &vmw_user_bo_release,
672                                     &vmw_user_bo_ref_obj_release);
673         if (unlikely(ret != 0)) {
674                 ttm_bo_put(&user_bo->vbo.base);
675                 goto out_no_base_object;
676         }
677
678         *p_vbo = &user_bo->vbo;
679         if (p_base) {
680                 *p_base = &user_bo->prime.base;
681                 kref_get(&(*p_base)->refcount);
682         }
683         *handle = user_bo->prime.base.handle;
684
685 out_no_base_object:
686         return ret;
687 }
688
689
690 /**
691  * vmw_user_bo_verify_access - verify access permissions on this
692  * buffer object.
693  *
694  * @bo: Pointer to the buffer object being accessed
695  * @tfile: Identifying the caller.
696  */
697 int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
698                               struct ttm_object_file *tfile)
699 {
700         struct vmw_user_buffer_object *vmw_user_bo;
701
702         if (unlikely(bo->destroy != vmw_user_bo_destroy))
703                 return -EPERM;
704
705         vmw_user_bo = vmw_user_buffer_object(bo);
706
707         /* Check that the caller has opened the object. */
708         if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
709                 return 0;
710
711         DRM_ERROR("Could not grant buffer access.\n");
712         return -EPERM;
713 }
714
715
716 /**
717  * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
718  * access, idling previous GPU operations on the buffer and optionally
719  * blocking it for further command submissions.
720  *
721  * @user_bo: Pointer to the buffer object being grabbed for CPU access
722  * @tfile: Identifying the caller.
723  * @flags: Flags indicating how the grab should be performed.
724  * Return: Zero on success, Negative error code on error. In particular,
725  * -EBUSY will be returned if a dontblock operation is requested and the
726  * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
727  * interrupted by a signal.
728  *
729  * A blocking grab will be automatically released when @tfile is closed.
730  */
731 static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
732                                     struct ttm_object_file *tfile,
733                                     uint32_t flags)
734 {
735         bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
736         struct ttm_buffer_object *bo = &user_bo->vbo.base;
737         bool existed;
738         int ret;
739
740         if (flags & drm_vmw_synccpu_allow_cs) {
741                 long lret;
742
743                 lret = dma_resv_wait_timeout_rcu
744                         (bo->base.resv, true, true,
745                          nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
746                 if (!lret)
747                         return -EBUSY;
748                 else if (lret < 0)
749                         return lret;
750                 return 0;
751         }
752
753         ret = ttm_bo_reserve(bo, true, nonblock, NULL);
754         if (unlikely(ret != 0))
755                 return ret;
756
757         ret = ttm_bo_wait(bo, true, nonblock);
758         if (likely(ret == 0))
759                 atomic_inc(&user_bo->vbo.cpu_writers);
760
761         ttm_bo_unreserve(bo);
762         if (unlikely(ret != 0))
763                 return ret;
764
765         ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
766                                  TTM_REF_SYNCCPU_WRITE, &existed, false);
767         if (ret != 0 || existed)
768                 atomic_dec(&user_bo->vbo.cpu_writers);
769
770         return ret;
771 }
772
773 /**
774  * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
775  * and unblock command submission on the buffer if blocked.
776  *
777  * @handle: Handle identifying the buffer object.
778  * @tfile: Identifying the caller.
779  * @flags: Flags indicating the type of release.
780  */
781 static int vmw_user_bo_synccpu_release(uint32_t handle,
782                                            struct ttm_object_file *tfile,
783                                            uint32_t flags)
784 {
785         if (!(flags & drm_vmw_synccpu_allow_cs))
786                 return ttm_ref_object_base_unref(tfile, handle,
787                                                  TTM_REF_SYNCCPU_WRITE);
788
789         return 0;
790 }
791
792
793 /**
794  * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
795  * functionality.
796  *
797  * @dev: Identifies the drm device.
798  * @data: Pointer to the ioctl argument.
799  * @file_priv: Identifies the caller.
800  * Return: Zero on success, negative error code on error.
801  *
802  * This function checks the ioctl arguments for validity and calls the
803  * relevant synccpu functions.
804  */
805 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
806                               struct drm_file *file_priv)
807 {
808         struct drm_vmw_synccpu_arg *arg =
809                 (struct drm_vmw_synccpu_arg *) data;
810         struct vmw_buffer_object *vbo;
811         struct vmw_user_buffer_object *user_bo;
812         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
813         struct ttm_base_object *buffer_base;
814         int ret;
815
816         if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
817             || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
818                                drm_vmw_synccpu_dontblock |
819                                drm_vmw_synccpu_allow_cs)) != 0) {
820                 DRM_ERROR("Illegal synccpu flags.\n");
821                 return -EINVAL;
822         }
823
824         switch (arg->op) {
825         case drm_vmw_synccpu_grab:
826                 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
827                                              &buffer_base);
828                 if (unlikely(ret != 0))
829                         return ret;
830
831                 user_bo = container_of(vbo, struct vmw_user_buffer_object,
832                                        vbo);
833                 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
834                 vmw_bo_unreference(&vbo);
835                 ttm_base_object_unref(&buffer_base);
836                 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
837                              ret != -EBUSY)) {
838                         DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
839                                   (unsigned int) arg->handle);
840                         return ret;
841                 }
842                 break;
843         case drm_vmw_synccpu_release:
844                 ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
845                                                   arg->flags);
846                 if (unlikely(ret != 0)) {
847                         DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
848                                   (unsigned int) arg->handle);
849                         return ret;
850                 }
851                 break;
852         default:
853                 DRM_ERROR("Invalid synccpu operation.\n");
854                 return -EINVAL;
855         }
856
857         return 0;
858 }
859
860
861 /**
862  * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
863  * allocation functionality.
864  *
865  * @dev: Identifies the drm device.
866  * @data: Pointer to the ioctl argument.
867  * @file_priv: Identifies the caller.
868  * Return: Zero on success, negative error code on error.
869  *
870  * This function checks the ioctl arguments for validity and allocates a
871  * struct vmw_user_buffer_object bo.
872  */
873 int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
874                        struct drm_file *file_priv)
875 {
876         struct vmw_private *dev_priv = vmw_priv(dev);
877         union drm_vmw_alloc_dmabuf_arg *arg =
878             (union drm_vmw_alloc_dmabuf_arg *)data;
879         struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
880         struct drm_vmw_dmabuf_rep *rep = &arg->rep;
881         struct vmw_buffer_object *vbo;
882         uint32_t handle;
883         int ret;
884
885         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
886         if (unlikely(ret != 0))
887                 return ret;
888
889         ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
890                                 req->size, false, &handle, &vbo,
891                                 NULL);
892         if (unlikely(ret != 0))
893                 goto out_no_bo;
894
895         rep->handle = handle;
896         rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
897         rep->cur_gmr_id = handle;
898         rep->cur_gmr_offset = 0;
899
900         vmw_bo_unreference(&vbo);
901
902 out_no_bo:
903         ttm_read_unlock(&dev_priv->reservation_sem);
904
905         return ret;
906 }
907
908
909 /**
910  * vmw_bo_unref_ioctl - Generic handle close ioctl.
911  *
912  * @dev: Identifies the drm device.
913  * @data: Pointer to the ioctl argument.
914  * @file_priv: Identifies the caller.
915  * Return: Zero on success, negative error code on error.
916  *
917  * This function checks the ioctl arguments for validity and closes a
918  * handle to a TTM base object, optionally freeing the object.
919  */
920 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
921                        struct drm_file *file_priv)
922 {
923         struct drm_vmw_unref_dmabuf_arg *arg =
924             (struct drm_vmw_unref_dmabuf_arg *)data;
925
926         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
927                                          arg->handle,
928                                          TTM_REF_USAGE);
929 }
930
931
932 /**
933  * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
934  *
935  * @tfile: The TTM object file the handle is registered with.
936  * @handle: The user buffer object handle
937  * @out: Pointer to a where a pointer to the embedded
938  * struct vmw_buffer_object should be placed.
939  * @p_base: Pointer to where a pointer to the TTM base object should be
940  * placed, or NULL if no such pointer is required.
941  * Return: Zero on success, Negative error code on error.
942  *
943  * Both the output base object pointer and the vmw buffer object pointer
944  * will be refcounted.
945  */
946 int vmw_user_bo_lookup(struct ttm_object_file *tfile,
947                        uint32_t handle, struct vmw_buffer_object **out,
948                        struct ttm_base_object **p_base)
949 {
950         struct vmw_user_buffer_object *vmw_user_bo;
951         struct ttm_base_object *base;
952
953         base = ttm_base_object_lookup(tfile, handle);
954         if (unlikely(base == NULL)) {
955                 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
956                           (unsigned long)handle);
957                 return -ESRCH;
958         }
959
960         if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
961                 ttm_base_object_unref(&base);
962                 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
963                           (unsigned long)handle);
964                 return -EINVAL;
965         }
966
967         vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
968                                    prime.base);
969         ttm_bo_get(&vmw_user_bo->vbo.base);
970         if (p_base)
971                 *p_base = base;
972         else
973                 ttm_base_object_unref(&base);
974         *out = &vmw_user_bo->vbo;
975
976         return 0;
977 }
978
979 /**
980  * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
981  * @tfile: The TTM object file the handle is registered with.
982  * @handle: The user buffer object handle.
983  *
984  * This function looks up a struct vmw_user_bo and returns a pointer to the
985  * struct vmw_buffer_object it derives from without refcounting the pointer.
986  * The returned pointer is only valid until vmw_user_bo_noref_release() is
987  * called, and the object pointed to by the returned pointer may be doomed.
988  * Any persistent usage of the object requires a refcount to be taken using
989  * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
990  * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
991  * or scheduling functions may be called inbetween these function calls.
992  *
993  * Return: A struct vmw_buffer_object pointer if successful or negative
994  * error pointer on failure.
995  */
996 struct vmw_buffer_object *
997 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
998 {
999         struct vmw_user_buffer_object *vmw_user_bo;
1000         struct ttm_base_object *base;
1001
1002         base = ttm_base_object_noref_lookup(tfile, handle);
1003         if (!base) {
1004                 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
1005                           (unsigned long)handle);
1006                 return ERR_PTR(-ESRCH);
1007         }
1008
1009         if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
1010                 ttm_base_object_noref_release();
1011                 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
1012                           (unsigned long)handle);
1013                 return ERR_PTR(-EINVAL);
1014         }
1015
1016         vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
1017                                    prime.base);
1018         return &vmw_user_bo->vbo;
1019 }
1020
1021 /**
1022  * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
1023  *
1024  * @tfile: The TTM object file to register the handle with.
1025  * @vbo: The embedded vmw buffer object.
1026  * @handle: Pointer to where the new handle should be placed.
1027  * Return: Zero on success, Negative error code on error.
1028  */
1029 int vmw_user_bo_reference(struct ttm_object_file *tfile,
1030                           struct vmw_buffer_object *vbo,
1031                           uint32_t *handle)
1032 {
1033         struct vmw_user_buffer_object *user_bo;
1034
1035         if (vbo->base.destroy != vmw_user_bo_destroy)
1036                 return -EINVAL;
1037
1038         user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
1039
1040         *handle = user_bo->prime.base.handle;
1041         return ttm_ref_object_add(tfile, &user_bo->prime.base,
1042                                   TTM_REF_USAGE, NULL, false);
1043 }
1044
1045
1046 /**
1047  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
1048  *                       object without unreserving it.
1049  *
1050  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1051  * @fence:          Pointer to the fence. If NULL, this function will
1052  *                  insert a fence into the command stream..
1053  *
1054  * Contrary to the ttm_eu version of this function, it takes only
1055  * a single buffer object instead of a list, and it also doesn't
1056  * unreserve the buffer object, which needs to be done separately.
1057  */
1058 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1059                          struct vmw_fence_obj *fence)
1060 {
1061         struct ttm_device *bdev = bo->bdev;
1062
1063         struct vmw_private *dev_priv =
1064                 container_of(bdev, struct vmw_private, bdev);
1065
1066         if (fence == NULL) {
1067                 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1068                 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1069                 dma_fence_put(&fence->base);
1070         } else
1071                 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1072 }
1073
1074
1075 /**
1076  * vmw_dumb_create - Create a dumb kms buffer
1077  *
1078  * @file_priv: Pointer to a struct drm_file identifying the caller.
1079  * @dev: Pointer to the drm device.
1080  * @args: Pointer to a struct drm_mode_create_dumb structure
1081  * Return: Zero on success, negative error code on failure.
1082  *
1083  * This is a driver callback for the core drm create_dumb functionality.
1084  * Note that this is very similar to the vmw_bo_alloc ioctl, except
1085  * that the arguments have a different format.
1086  */
1087 int vmw_dumb_create(struct drm_file *file_priv,
1088                     struct drm_device *dev,
1089                     struct drm_mode_create_dumb *args)
1090 {
1091         struct vmw_private *dev_priv = vmw_priv(dev);
1092         struct vmw_buffer_object *vbo;
1093         int ret;
1094
1095         args->pitch = args->width * ((args->bpp + 7) / 8);
1096         args->size = args->pitch * args->height;
1097
1098         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1099         if (unlikely(ret != 0))
1100                 return ret;
1101
1102         ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1103                                     args->size, false, &args->handle,
1104                                     &vbo, NULL);
1105         if (unlikely(ret != 0))
1106                 goto out_no_bo;
1107
1108         vmw_bo_unreference(&vbo);
1109 out_no_bo:
1110         ttm_read_unlock(&dev_priv->reservation_sem);
1111         return ret;
1112 }
1113
1114
1115 /**
1116  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1117  *
1118  * @file_priv: Pointer to a struct drm_file identifying the caller.
1119  * @dev: Pointer to the drm device.
1120  * @handle: Handle identifying the dumb buffer.
1121  * @offset: The address space offset returned.
1122  * Return: Zero on success, negative error code on failure.
1123  *
1124  * This is a driver callback for the core drm dumb_map_offset functionality.
1125  */
1126 int vmw_dumb_map_offset(struct drm_file *file_priv,
1127                         struct drm_device *dev, uint32_t handle,
1128                         uint64_t *offset)
1129 {
1130         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1131         struct vmw_buffer_object *out_buf;
1132         int ret;
1133
1134         ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1135         if (ret != 0)
1136                 return -EINVAL;
1137
1138         *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1139         vmw_bo_unreference(&out_buf);
1140         return 0;
1141 }
1142
1143
1144 /**
1145  * vmw_dumb_destroy - Destroy a dumb boffer
1146  *
1147  * @file_priv: Pointer to a struct drm_file identifying the caller.
1148  * @dev: Pointer to the drm device.
1149  * @handle: Handle identifying the dumb buffer.
1150  * Return: Zero on success, negative error code on failure.
1151  *
1152  * This is a driver callback for the core drm dumb_destroy functionality.
1153  */
1154 int vmw_dumb_destroy(struct drm_file *file_priv,
1155                      struct drm_device *dev,
1156                      uint32_t handle)
1157 {
1158         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1159                                          handle, TTM_REF_USAGE);
1160 }
1161
1162
1163 /**
1164  * vmw_bo_swap_notify - swapout notify callback.
1165  *
1166  * @bo: The buffer object to be swapped out.
1167  */
1168 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1169 {
1170         /* Is @bo embedded in a struct vmw_buffer_object? */
1171         if (bo->destroy != vmw_bo_bo_free &&
1172             bo->destroy != vmw_user_bo_destroy)
1173                 return;
1174
1175         /* Kill any cached kernel maps before swapout */
1176         vmw_bo_unmap(vmw_buffer_object(bo));
1177 }
1178
1179
1180 /**
1181  * vmw_bo_move_notify - TTM move_notify_callback
1182  *
1183  * @bo: The TTM buffer object about to move.
1184  * @mem: The struct ttm_resource indicating to what memory
1185  *       region the move is taking place.
1186  *
1187  * Detaches cached maps and device bindings that require that the
1188  * buffer doesn't move.
1189  */
1190 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1191                         struct ttm_resource *mem)
1192 {
1193         struct vmw_buffer_object *vbo;
1194
1195         /* Make sure @bo is embedded in a struct vmw_buffer_object? */
1196         if (bo->destroy != vmw_bo_bo_free &&
1197             bo->destroy != vmw_user_bo_destroy)
1198                 return;
1199
1200         vbo = container_of(bo, struct vmw_buffer_object, base);
1201
1202         /*
1203          * Kill any cached kernel maps before move to or from VRAM.
1204          * With other types of moves, the underlying pages stay the same,
1205          * and the map can be kept.
1206          */
1207         if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1208                 vmw_bo_unmap(vbo);
1209
1210         /*
1211          * If we're moving a backup MOB out of MOB placement, then make sure we
1212          * read back all resource content first, and unbind the MOB from
1213          * the resource.
1214          */
1215         if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1216                 vmw_resource_unbind_list(vbo);
1217 }