Merge tag 'mvebu-arm-5.2-1' of git://git.infradead.org/linux-mvebu into arm/late
[linux-2.6-microblaze.git] / drivers / gpu / drm / vmwgfx / vmwgfx_surface.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <drm/ttm/ttm_placement.h>
29
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32 #include "vmwgfx_so.h"
33 #include "vmwgfx_binding.h"
34 #include "device_include/svga3d_surfacedefs.h"
35
36 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
37 #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
38 #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
39         (svga3d_flags & ((uint64_t)U32_MAX))
40
41 /**
42  * struct vmw_user_surface - User-space visible surface resource
43  *
44  * @base:           The TTM base object handling user-space visibility.
45  * @srf:            The surface metadata.
46  * @size:           TTM accounting size for the surface.
47  * @master: master of the creating client. Used for security check.
48  */
49 struct vmw_user_surface {
50         struct ttm_prime_object prime;
51         struct vmw_surface srf;
52         uint32_t size;
53         struct drm_master *master;
54         struct ttm_base_object *backup_base;
55 };
56
57 /**
58  * struct vmw_surface_offset - Backing store mip level offset info
59  *
60  * @face:           Surface face.
61  * @mip:            Mip level.
62  * @bo_offset:      Offset into backing store of this mip level.
63  *
64  */
65 struct vmw_surface_offset {
66         uint32_t face;
67         uint32_t mip;
68         uint32_t bo_offset;
69 };
70
71 static void vmw_user_surface_free(struct vmw_resource *res);
72 static struct vmw_resource *
73 vmw_user_surface_base_to_res(struct ttm_base_object *base);
74 static int vmw_legacy_srf_bind(struct vmw_resource *res,
75                                struct ttm_validate_buffer *val_buf);
76 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
77                                  bool readback,
78                                  struct ttm_validate_buffer *val_buf);
79 static int vmw_legacy_srf_create(struct vmw_resource *res);
80 static int vmw_legacy_srf_destroy(struct vmw_resource *res);
81 static int vmw_gb_surface_create(struct vmw_resource *res);
82 static int vmw_gb_surface_bind(struct vmw_resource *res,
83                                struct ttm_validate_buffer *val_buf);
84 static int vmw_gb_surface_unbind(struct vmw_resource *res,
85                                  bool readback,
86                                  struct ttm_validate_buffer *val_buf);
87 static int vmw_gb_surface_destroy(struct vmw_resource *res);
88 static int
89 vmw_gb_surface_define_internal(struct drm_device *dev,
90                                struct drm_vmw_gb_surface_create_ext_req *req,
91                                struct drm_vmw_gb_surface_create_rep *rep,
92                                struct drm_file *file_priv);
93 static int
94 vmw_gb_surface_reference_internal(struct drm_device *dev,
95                                   struct drm_vmw_surface_arg *req,
96                                   struct drm_vmw_gb_surface_ref_ext_rep *rep,
97                                   struct drm_file *file_priv);
98
99 static const struct vmw_user_resource_conv user_surface_conv = {
100         .object_type = VMW_RES_SURFACE,
101         .base_obj_to_res = vmw_user_surface_base_to_res,
102         .res_free = vmw_user_surface_free
103 };
104
105 const struct vmw_user_resource_conv *user_surface_converter =
106         &user_surface_conv;
107
108
109 static uint64_t vmw_user_surface_size;
110
111 static const struct vmw_res_func vmw_legacy_surface_func = {
112         .res_type = vmw_res_surface,
113         .needs_backup = false,
114         .may_evict = true,
115         .type_name = "legacy surfaces",
116         .backup_placement = &vmw_srf_placement,
117         .create = &vmw_legacy_srf_create,
118         .destroy = &vmw_legacy_srf_destroy,
119         .bind = &vmw_legacy_srf_bind,
120         .unbind = &vmw_legacy_srf_unbind
121 };
122
123 static const struct vmw_res_func vmw_gb_surface_func = {
124         .res_type = vmw_res_surface,
125         .needs_backup = true,
126         .may_evict = true,
127         .type_name = "guest backed surfaces",
128         .backup_placement = &vmw_mob_placement,
129         .create = vmw_gb_surface_create,
130         .destroy = vmw_gb_surface_destroy,
131         .bind = vmw_gb_surface_bind,
132         .unbind = vmw_gb_surface_unbind
133 };
134
135 /**
136  * struct vmw_surface_dma - SVGA3D DMA command
137  */
138 struct vmw_surface_dma {
139         SVGA3dCmdHeader header;
140         SVGA3dCmdSurfaceDMA body;
141         SVGA3dCopyBox cb;
142         SVGA3dCmdSurfaceDMASuffix suffix;
143 };
144
145 /**
146  * struct vmw_surface_define - SVGA3D Surface Define command
147  */
148 struct vmw_surface_define {
149         SVGA3dCmdHeader header;
150         SVGA3dCmdDefineSurface body;
151 };
152
153 /**
154  * struct vmw_surface_destroy - SVGA3D Surface Destroy command
155  */
156 struct vmw_surface_destroy {
157         SVGA3dCmdHeader header;
158         SVGA3dCmdDestroySurface body;
159 };
160
161
162 /**
163  * vmw_surface_dma_size - Compute fifo size for a dma command.
164  *
165  * @srf: Pointer to a struct vmw_surface
166  *
167  * Computes the required size for a surface dma command for backup or
168  * restoration of the surface represented by @srf.
169  */
170 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
171 {
172         return srf->num_sizes * sizeof(struct vmw_surface_dma);
173 }
174
175
176 /**
177  * vmw_surface_define_size - Compute fifo size for a surface define command.
178  *
179  * @srf: Pointer to a struct vmw_surface
180  *
181  * Computes the required size for a surface define command for the definition
182  * of the surface represented by @srf.
183  */
184 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
185 {
186         return sizeof(struct vmw_surface_define) + srf->num_sizes *
187                 sizeof(SVGA3dSize);
188 }
189
190
191 /**
192  * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
193  *
194  * Computes the required size for a surface destroy command for the destruction
195  * of a hw surface.
196  */
197 static inline uint32_t vmw_surface_destroy_size(void)
198 {
199         return sizeof(struct vmw_surface_destroy);
200 }
201
202 /**
203  * vmw_surface_destroy_encode - Encode a surface_destroy command.
204  *
205  * @id: The surface id
206  * @cmd_space: Pointer to memory area in which the commands should be encoded.
207  */
208 static void vmw_surface_destroy_encode(uint32_t id,
209                                        void *cmd_space)
210 {
211         struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
212                 cmd_space;
213
214         cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
215         cmd->header.size = sizeof(cmd->body);
216         cmd->body.sid = id;
217 }
218
219 /**
220  * vmw_surface_define_encode - Encode a surface_define command.
221  *
222  * @srf: Pointer to a struct vmw_surface object.
223  * @cmd_space: Pointer to memory area in which the commands should be encoded.
224  */
225 static void vmw_surface_define_encode(const struct vmw_surface *srf,
226                                       void *cmd_space)
227 {
228         struct vmw_surface_define *cmd = (struct vmw_surface_define *)
229                 cmd_space;
230         struct drm_vmw_size *src_size;
231         SVGA3dSize *cmd_size;
232         uint32_t cmd_len;
233         int i;
234
235         cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
236
237         cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
238         cmd->header.size = cmd_len;
239         cmd->body.sid = srf->res.id;
240         /*
241          * Downcast of surfaceFlags, was upcasted when received from user-space,
242          * since driver internally stores as 64 bit.
243          * For legacy surface define only 32 bit flag is supported.
244          */
245         cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->flags;
246         cmd->body.format = srf->format;
247         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
248                 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
249
250         cmd += 1;
251         cmd_size = (SVGA3dSize *) cmd;
252         src_size = srf->sizes;
253
254         for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
255                 cmd_size->width = src_size->width;
256                 cmd_size->height = src_size->height;
257                 cmd_size->depth = src_size->depth;
258         }
259 }
260
261 /**
262  * vmw_surface_dma_encode - Encode a surface_dma command.
263  *
264  * @srf: Pointer to a struct vmw_surface object.
265  * @cmd_space: Pointer to memory area in which the commands should be encoded.
266  * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
267  * should be placed or read from.
268  * @to_surface: Boolean whether to DMA to the surface or from the surface.
269  */
270 static void vmw_surface_dma_encode(struct vmw_surface *srf,
271                                    void *cmd_space,
272                                    const SVGAGuestPtr *ptr,
273                                    bool to_surface)
274 {
275         uint32_t i;
276         struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
277         const struct svga3d_surface_desc *desc =
278                 svga3dsurface_get_desc(srf->format);
279
280         for (i = 0; i < srf->num_sizes; ++i) {
281                 SVGA3dCmdHeader *header = &cmd->header;
282                 SVGA3dCmdSurfaceDMA *body = &cmd->body;
283                 SVGA3dCopyBox *cb = &cmd->cb;
284                 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
285                 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
286                 const struct drm_vmw_size *cur_size = &srf->sizes[i];
287
288                 header->id = SVGA_3D_CMD_SURFACE_DMA;
289                 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
290
291                 body->guest.ptr = *ptr;
292                 body->guest.ptr.offset += cur_offset->bo_offset;
293                 body->guest.pitch = svga3dsurface_calculate_pitch(desc,
294                                                                   cur_size);
295                 body->host.sid = srf->res.id;
296                 body->host.face = cur_offset->face;
297                 body->host.mipmap = cur_offset->mip;
298                 body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
299                                   SVGA3D_READ_HOST_VRAM);
300                 cb->x = 0;
301                 cb->y = 0;
302                 cb->z = 0;
303                 cb->srcx = 0;
304                 cb->srcy = 0;
305                 cb->srcz = 0;
306                 cb->w = cur_size->width;
307                 cb->h = cur_size->height;
308                 cb->d = cur_size->depth;
309
310                 suffix->suffixSize = sizeof(*suffix);
311                 suffix->maximumOffset =
312                         svga3dsurface_get_image_buffer_size(desc, cur_size,
313                                                             body->guest.pitch);
314                 suffix->flags.discard = 0;
315                 suffix->flags.unsynchronized = 0;
316                 suffix->flags.reserved = 0;
317                 ++cmd;
318         }
319 };
320
321
322 /**
323  * vmw_hw_surface_destroy - destroy a Device surface
324  *
325  * @res:        Pointer to a struct vmw_resource embedded in a struct
326  *              vmw_surface.
327  *
328  * Destroys a the device surface associated with a struct vmw_surface if
329  * any, and adjusts accounting and resource count accordingly.
330  */
331 static void vmw_hw_surface_destroy(struct vmw_resource *res)
332 {
333
334         struct vmw_private *dev_priv = res->dev_priv;
335         struct vmw_surface *srf;
336         void *cmd;
337
338         if (res->func->destroy == vmw_gb_surface_destroy) {
339                 (void) vmw_gb_surface_destroy(res);
340                 return;
341         }
342
343         if (res->id != -1) {
344
345                 cmd = VMW_FIFO_RESERVE(dev_priv, vmw_surface_destroy_size());
346                 if (unlikely(!cmd))
347                         return;
348
349                 vmw_surface_destroy_encode(res->id, cmd);
350                 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
351
352                 /*
353                  * used_memory_size_atomic, or separate lock
354                  * to avoid taking dev_priv::cmdbuf_mutex in
355                  * the destroy path.
356                  */
357
358                 mutex_lock(&dev_priv->cmdbuf_mutex);
359                 srf = vmw_res_to_srf(res);
360                 dev_priv->used_memory_size -= res->backup_size;
361                 mutex_unlock(&dev_priv->cmdbuf_mutex);
362         }
363 }
364
365 /**
366  * vmw_legacy_srf_create - Create a device surface as part of the
367  * resource validation process.
368  *
369  * @res: Pointer to a struct vmw_surface.
370  *
371  * If the surface doesn't have a hw id.
372  *
373  * Returns -EBUSY if there wasn't sufficient device resources to
374  * complete the validation. Retry after freeing up resources.
375  *
376  * May return other errors if the kernel is out of guest resources.
377  */
378 static int vmw_legacy_srf_create(struct vmw_resource *res)
379 {
380         struct vmw_private *dev_priv = res->dev_priv;
381         struct vmw_surface *srf;
382         uint32_t submit_size;
383         uint8_t *cmd;
384         int ret;
385
386         if (likely(res->id != -1))
387                 return 0;
388
389         srf = vmw_res_to_srf(res);
390         if (unlikely(dev_priv->used_memory_size + res->backup_size >=
391                      dev_priv->memory_size))
392                 return -EBUSY;
393
394         /*
395          * Alloc id for the resource.
396          */
397
398         ret = vmw_resource_alloc_id(res);
399         if (unlikely(ret != 0)) {
400                 DRM_ERROR("Failed to allocate a surface id.\n");
401                 goto out_no_id;
402         }
403
404         if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
405                 ret = -EBUSY;
406                 goto out_no_fifo;
407         }
408
409         /*
410          * Encode surface define- commands.
411          */
412
413         submit_size = vmw_surface_define_size(srf);
414         cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
415         if (unlikely(!cmd)) {
416                 ret = -ENOMEM;
417                 goto out_no_fifo;
418         }
419
420         vmw_surface_define_encode(srf, cmd);
421         vmw_fifo_commit(dev_priv, submit_size);
422         vmw_fifo_resource_inc(dev_priv);
423
424         /*
425          * Surface memory usage accounting.
426          */
427
428         dev_priv->used_memory_size += res->backup_size;
429         return 0;
430
431 out_no_fifo:
432         vmw_resource_release_id(res);
433 out_no_id:
434         return ret;
435 }
436
437 /**
438  * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
439  *
440  * @res:            Pointer to a struct vmw_res embedded in a struct
441  *                  vmw_surface.
442  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
443  *                  information about the backup buffer.
444  * @bind:           Boolean wether to DMA to the surface.
445  *
446  * Transfer backup data to or from a legacy surface as part of the
447  * validation process.
448  * May return other errors if the kernel is out of guest resources.
449  * The backup buffer will be fenced or idle upon successful completion,
450  * and if the surface needs persistent backup storage, the backup buffer
451  * will also be returned reserved iff @bind is true.
452  */
453 static int vmw_legacy_srf_dma(struct vmw_resource *res,
454                               struct ttm_validate_buffer *val_buf,
455                               bool bind)
456 {
457         SVGAGuestPtr ptr;
458         struct vmw_fence_obj *fence;
459         uint32_t submit_size;
460         struct vmw_surface *srf = vmw_res_to_srf(res);
461         uint8_t *cmd;
462         struct vmw_private *dev_priv = res->dev_priv;
463
464         BUG_ON(!val_buf->bo);
465         submit_size = vmw_surface_dma_size(srf);
466         cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
467         if (unlikely(!cmd))
468                 return -ENOMEM;
469
470         vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
471         vmw_surface_dma_encode(srf, cmd, &ptr, bind);
472
473         vmw_fifo_commit(dev_priv, submit_size);
474
475         /*
476          * Create a fence object and fence the backup buffer.
477          */
478
479         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
480                                           &fence, NULL);
481
482         vmw_bo_fence_single(val_buf->bo, fence);
483
484         if (likely(fence != NULL))
485                 vmw_fence_obj_unreference(&fence);
486
487         return 0;
488 }
489
490 /**
491  * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
492  *                       surface validation process.
493  *
494  * @res:            Pointer to a struct vmw_res embedded in a struct
495  *                  vmw_surface.
496  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
497  *                  information about the backup buffer.
498  *
499  * This function will copy backup data to the surface if the
500  * backup buffer is dirty.
501  */
502 static int vmw_legacy_srf_bind(struct vmw_resource *res,
503                                struct ttm_validate_buffer *val_buf)
504 {
505         if (!res->backup_dirty)
506                 return 0;
507
508         return vmw_legacy_srf_dma(res, val_buf, true);
509 }
510
511
512 /**
513  * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
514  *                         surface eviction process.
515  *
516  * @res:            Pointer to a struct vmw_res embedded in a struct
517  *                  vmw_surface.
518  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
519  *                  information about the backup buffer.
520  *
521  * This function will copy backup data from the surface.
522  */
523 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
524                                  bool readback,
525                                  struct ttm_validate_buffer *val_buf)
526 {
527         if (unlikely(readback))
528                 return vmw_legacy_srf_dma(res, val_buf, false);
529         return 0;
530 }
531
532 /**
533  * vmw_legacy_srf_destroy - Destroy a device surface as part of a
534  *                          resource eviction process.
535  *
536  * @res:            Pointer to a struct vmw_res embedded in a struct
537  *                  vmw_surface.
538  */
539 static int vmw_legacy_srf_destroy(struct vmw_resource *res)
540 {
541         struct vmw_private *dev_priv = res->dev_priv;
542         uint32_t submit_size;
543         uint8_t *cmd;
544
545         BUG_ON(res->id == -1);
546
547         /*
548          * Encode the dma- and surface destroy commands.
549          */
550
551         submit_size = vmw_surface_destroy_size();
552         cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
553         if (unlikely(!cmd))
554                 return -ENOMEM;
555
556         vmw_surface_destroy_encode(res->id, cmd);
557         vmw_fifo_commit(dev_priv, submit_size);
558
559         /*
560          * Surface memory usage accounting.
561          */
562
563         dev_priv->used_memory_size -= res->backup_size;
564
565         /*
566          * Release the surface ID.
567          */
568
569         vmw_resource_release_id(res);
570         vmw_fifo_resource_dec(dev_priv);
571
572         return 0;
573 }
574
575
576 /**
577  * vmw_surface_init - initialize a struct vmw_surface
578  *
579  * @dev_priv:       Pointer to a device private struct.
580  * @srf:            Pointer to the struct vmw_surface to initialize.
581  * @res_free:       Pointer to a resource destructor used to free
582  *                  the object.
583  */
584 static int vmw_surface_init(struct vmw_private *dev_priv,
585                             struct vmw_surface *srf,
586                             void (*res_free) (struct vmw_resource *res))
587 {
588         int ret;
589         struct vmw_resource *res = &srf->res;
590
591         BUG_ON(!res_free);
592         ret = vmw_resource_init(dev_priv, res, true, res_free,
593                                 (dev_priv->has_mob) ? &vmw_gb_surface_func :
594                                 &vmw_legacy_surface_func);
595
596         if (unlikely(ret != 0)) {
597                 res_free(res);
598                 return ret;
599         }
600
601         /*
602          * The surface won't be visible to hardware until a
603          * surface validate.
604          */
605
606         INIT_LIST_HEAD(&srf->view_list);
607         res->hw_destroy = vmw_hw_surface_destroy;
608         return ret;
609 }
610
611 /**
612  * vmw_user_surface_base_to_res - TTM base object to resource converter for
613  *                                user visible surfaces
614  *
615  * @base:           Pointer to a TTM base object
616  *
617  * Returns the struct vmw_resource embedded in a struct vmw_surface
618  * for the user-visible object identified by the TTM base object @base.
619  */
620 static struct vmw_resource *
621 vmw_user_surface_base_to_res(struct ttm_base_object *base)
622 {
623         return &(container_of(base, struct vmw_user_surface,
624                               prime.base)->srf.res);
625 }
626
627 /**
628  * vmw_user_surface_free - User visible surface resource destructor
629  *
630  * @res:            A struct vmw_resource embedded in a struct vmw_surface.
631  */
632 static void vmw_user_surface_free(struct vmw_resource *res)
633 {
634         struct vmw_surface *srf = vmw_res_to_srf(res);
635         struct vmw_user_surface *user_srf =
636             container_of(srf, struct vmw_user_surface, srf);
637         struct vmw_private *dev_priv = srf->res.dev_priv;
638         uint32_t size = user_srf->size;
639
640         if (user_srf->master)
641                 drm_master_put(&user_srf->master);
642         kfree(srf->offsets);
643         kfree(srf->sizes);
644         kfree(srf->snooper.image);
645         ttm_prime_object_kfree(user_srf, prime);
646         ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
647 }
648
649 /**
650  * vmw_user_surface_free - User visible surface TTM base object destructor
651  *
652  * @p_base:         Pointer to a pointer to a TTM base object
653  *                  embedded in a struct vmw_user_surface.
654  *
655  * Drops the base object's reference on its resource, and the
656  * pointer pointed to by *p_base is set to NULL.
657  */
658 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
659 {
660         struct ttm_base_object *base = *p_base;
661         struct vmw_user_surface *user_srf =
662             container_of(base, struct vmw_user_surface, prime.base);
663         struct vmw_resource *res = &user_srf->srf.res;
664
665         *p_base = NULL;
666         if (user_srf->backup_base)
667                 ttm_base_object_unref(&user_srf->backup_base);
668         vmw_resource_unreference(&res);
669 }
670
671 /**
672  * vmw_user_surface_destroy_ioctl - Ioctl function implementing
673  *                                  the user surface destroy functionality.
674  *
675  * @dev:            Pointer to a struct drm_device.
676  * @data:           Pointer to data copied from / to user-space.
677  * @file_priv:      Pointer to a drm file private structure.
678  */
679 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
680                               struct drm_file *file_priv)
681 {
682         struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
683         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
684
685         return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
686 }
687
688 /**
689  * vmw_user_surface_define_ioctl - Ioctl function implementing
690  *                                  the user surface define functionality.
691  *
692  * @dev:            Pointer to a struct drm_device.
693  * @data:           Pointer to data copied from / to user-space.
694  * @file_priv:      Pointer to a drm file private structure.
695  */
696 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
697                              struct drm_file *file_priv)
698 {
699         struct vmw_private *dev_priv = vmw_priv(dev);
700         struct vmw_user_surface *user_srf;
701         struct vmw_surface *srf;
702         struct vmw_resource *res;
703         struct vmw_resource *tmp;
704         union drm_vmw_surface_create_arg *arg =
705             (union drm_vmw_surface_create_arg *)data;
706         struct drm_vmw_surface_create_req *req = &arg->req;
707         struct drm_vmw_surface_arg *rep = &arg->rep;
708         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
709         struct ttm_operation_ctx ctx = {
710                 .interruptible = true,
711                 .no_wait_gpu = false
712         };
713         int ret;
714         int i, j;
715         uint32_t cur_bo_offset;
716         struct drm_vmw_size *cur_size;
717         struct vmw_surface_offset *cur_offset;
718         uint32_t num_sizes;
719         uint32_t size;
720         const struct svga3d_surface_desc *desc;
721
722         if (unlikely(vmw_user_surface_size == 0))
723                 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
724                         VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
725
726         num_sizes = 0;
727         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
728                 if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
729                         return -EINVAL;
730                 num_sizes += req->mip_levels[i];
731         }
732
733         if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
734             num_sizes == 0)
735                 return -EINVAL;
736
737         size = vmw_user_surface_size +
738                 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
739                 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
740
741         desc = svga3dsurface_get_desc(req->format);
742         if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
743                 VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
744                                req->format);
745                 return -EINVAL;
746         }
747
748         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
749         if (unlikely(ret != 0))
750                 return ret;
751
752         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
753                                    size, &ctx);
754         if (unlikely(ret != 0)) {
755                 if (ret != -ERESTARTSYS)
756                         DRM_ERROR("Out of graphics memory for surface.\n");
757                 goto out_unlock;
758         }
759
760         user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
761         if (unlikely(!user_srf)) {
762                 ret = -ENOMEM;
763                 goto out_no_user_srf;
764         }
765
766         srf = &user_srf->srf;
767         res = &srf->res;
768
769         /* Driver internally stores as 64-bit flags */
770         srf->flags = (SVGA3dSurfaceAllFlags)req->flags;
771         srf->format = req->format;
772         srf->scanout = req->scanout;
773
774         memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
775         srf->num_sizes = num_sizes;
776         user_srf->size = size;
777         srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
778                                  req->size_addr,
779                                  sizeof(*srf->sizes) * srf->num_sizes);
780         if (IS_ERR(srf->sizes)) {
781                 ret = PTR_ERR(srf->sizes);
782                 goto out_no_sizes;
783         }
784         srf->offsets = kmalloc_array(srf->num_sizes,
785                                      sizeof(*srf->offsets),
786                                      GFP_KERNEL);
787         if (unlikely(!srf->offsets)) {
788                 ret = -ENOMEM;
789                 goto out_no_offsets;
790         }
791
792         srf->base_size = *srf->sizes;
793         srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
794         srf->multisample_count = 0;
795         srf->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
796         srf->quality_level = SVGA3D_MS_QUALITY_NONE;
797
798         cur_bo_offset = 0;
799         cur_offset = srf->offsets;
800         cur_size = srf->sizes;
801
802         for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
803                 for (j = 0; j < srf->mip_levels[i]; ++j) {
804                         uint32_t stride = svga3dsurface_calculate_pitch
805                                 (desc, cur_size);
806
807                         cur_offset->face = i;
808                         cur_offset->mip = j;
809                         cur_offset->bo_offset = cur_bo_offset;
810                         cur_bo_offset += svga3dsurface_get_image_buffer_size
811                                 (desc, cur_size, stride);
812                         ++cur_offset;
813                         ++cur_size;
814                 }
815         }
816         res->backup_size = cur_bo_offset;
817         if (srf->scanout &&
818             srf->num_sizes == 1 &&
819             srf->sizes[0].width == 64 &&
820             srf->sizes[0].height == 64 &&
821             srf->format == SVGA3D_A8R8G8B8) {
822
823                 srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
824                 if (!srf->snooper.image) {
825                         DRM_ERROR("Failed to allocate cursor_image\n");
826                         ret = -ENOMEM;
827                         goto out_no_copy;
828                 }
829         } else {
830                 srf->snooper.image = NULL;
831         }
832
833         user_srf->prime.base.shareable = false;
834         user_srf->prime.base.tfile = NULL;
835         if (drm_is_primary_client(file_priv))
836                 user_srf->master = drm_master_get(file_priv->master);
837
838         /**
839          * From this point, the generic resource management functions
840          * destroy the object on failure.
841          */
842
843         ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
844         if (unlikely(ret != 0))
845                 goto out_unlock;
846
847         /*
848          * A gb-aware client referencing a shared surface will
849          * expect a backup buffer to be present.
850          */
851         if (dev_priv->has_mob && req->shareable) {
852                 uint32_t backup_handle;
853
854                 ret = vmw_user_bo_alloc(dev_priv, tfile,
855                                         res->backup_size,
856                                         true,
857                                         &backup_handle,
858                                         &res->backup,
859                                         &user_srf->backup_base);
860                 if (unlikely(ret != 0)) {
861                         vmw_resource_unreference(&res);
862                         goto out_unlock;
863                 }
864         }
865
866         tmp = vmw_resource_reference(&srf->res);
867         ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
868                                     req->shareable, VMW_RES_SURFACE,
869                                     &vmw_user_surface_base_release, NULL);
870
871         if (unlikely(ret != 0)) {
872                 vmw_resource_unreference(&tmp);
873                 vmw_resource_unreference(&res);
874                 goto out_unlock;
875         }
876
877         rep->sid = user_srf->prime.base.handle;
878         vmw_resource_unreference(&res);
879
880         ttm_read_unlock(&dev_priv->reservation_sem);
881         return 0;
882 out_no_copy:
883         kfree(srf->offsets);
884 out_no_offsets:
885         kfree(srf->sizes);
886 out_no_sizes:
887         ttm_prime_object_kfree(user_srf, prime);
888 out_no_user_srf:
889         ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
890 out_unlock:
891         ttm_read_unlock(&dev_priv->reservation_sem);
892         return ret;
893 }
894
895
896 static int
897 vmw_surface_handle_reference(struct vmw_private *dev_priv,
898                              struct drm_file *file_priv,
899                              uint32_t u_handle,
900                              enum drm_vmw_handle_type handle_type,
901                              struct ttm_base_object **base_p)
902 {
903         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
904         struct vmw_user_surface *user_srf;
905         uint32_t handle;
906         struct ttm_base_object *base;
907         int ret;
908         bool require_exist = false;
909
910         if (handle_type == DRM_VMW_HANDLE_PRIME) {
911                 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
912                 if (unlikely(ret != 0))
913                         return ret;
914         } else {
915                 if (unlikely(drm_is_render_client(file_priv)))
916                         require_exist = true;
917
918                 if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) {
919                         DRM_ERROR("Locked master refused legacy "
920                                   "surface reference.\n");
921                         return -EACCES;
922                 }
923
924                 handle = u_handle;
925         }
926
927         ret = -EINVAL;
928         base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
929         if (unlikely(!base)) {
930                 VMW_DEBUG_USER("Could not find surface to reference.\n");
931                 goto out_no_lookup;
932         }
933
934         if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
935                 VMW_DEBUG_USER("Referenced object is not a surface.\n");
936                 goto out_bad_resource;
937         }
938
939         if (handle_type != DRM_VMW_HANDLE_PRIME) {
940                 user_srf = container_of(base, struct vmw_user_surface,
941                                         prime.base);
942
943                 /*
944                  * Make sure the surface creator has the same
945                  * authenticating master, or is already registered with us.
946                  */
947                 if (drm_is_primary_client(file_priv) &&
948                     user_srf->master != file_priv->master)
949                         require_exist = true;
950
951                 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
952                                          require_exist);
953                 if (unlikely(ret != 0)) {
954                         DRM_ERROR("Could not add a reference to a surface.\n");
955                         goto out_bad_resource;
956                 }
957         }
958
959         *base_p = base;
960         return 0;
961
962 out_bad_resource:
963         ttm_base_object_unref(&base);
964 out_no_lookup:
965         if (handle_type == DRM_VMW_HANDLE_PRIME)
966                 (void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
967
968         return ret;
969 }
970
971 /**
972  * vmw_user_surface_define_ioctl - Ioctl function implementing
973  *                                  the user surface reference functionality.
974  *
975  * @dev:            Pointer to a struct drm_device.
976  * @data:           Pointer to data copied from / to user-space.
977  * @file_priv:      Pointer to a drm file private structure.
978  */
979 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
980                                 struct drm_file *file_priv)
981 {
982         struct vmw_private *dev_priv = vmw_priv(dev);
983         union drm_vmw_surface_reference_arg *arg =
984             (union drm_vmw_surface_reference_arg *)data;
985         struct drm_vmw_surface_arg *req = &arg->req;
986         struct drm_vmw_surface_create_req *rep = &arg->rep;
987         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
988         struct vmw_surface *srf;
989         struct vmw_user_surface *user_srf;
990         struct drm_vmw_size __user *user_sizes;
991         struct ttm_base_object *base;
992         int ret;
993
994         ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
995                                            req->handle_type, &base);
996         if (unlikely(ret != 0))
997                 return ret;
998
999         user_srf = container_of(base, struct vmw_user_surface, prime.base);
1000         srf = &user_srf->srf;
1001
1002         /* Downcast of flags when sending back to user space */
1003         rep->flags = (uint32_t)srf->flags;
1004         rep->format = srf->format;
1005         memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
1006         user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1007             rep->size_addr;
1008
1009         if (user_sizes)
1010                 ret = copy_to_user(user_sizes, &srf->base_size,
1011                                    sizeof(srf->base_size));
1012         if (unlikely(ret != 0)) {
1013                 VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
1014                                srf->num_sizes);
1015                 ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
1016                 ret = -EFAULT;
1017         }
1018
1019         ttm_base_object_unref(&base);
1020
1021         return ret;
1022 }
1023
1024 /**
1025  * vmw_surface_define_encode - Encode a surface_define command.
1026  *
1027  * @srf: Pointer to a struct vmw_surface object.
1028  * @cmd_space: Pointer to memory area in which the commands should be encoded.
1029  */
1030 static int vmw_gb_surface_create(struct vmw_resource *res)
1031 {
1032         struct vmw_private *dev_priv = res->dev_priv;
1033         struct vmw_surface *srf = vmw_res_to_srf(res);
1034         uint32_t cmd_len, cmd_id, submit_len;
1035         int ret;
1036         struct {
1037                 SVGA3dCmdHeader header;
1038                 SVGA3dCmdDefineGBSurface body;
1039         } *cmd;
1040         struct {
1041                 SVGA3dCmdHeader header;
1042                 SVGA3dCmdDefineGBSurface_v2 body;
1043         } *cmd2;
1044         struct {
1045                 SVGA3dCmdHeader header;
1046                 SVGA3dCmdDefineGBSurface_v3 body;
1047         } *cmd3;
1048
1049         if (likely(res->id != -1))
1050                 return 0;
1051
1052         vmw_fifo_resource_inc(dev_priv);
1053         ret = vmw_resource_alloc_id(res);
1054         if (unlikely(ret != 0)) {
1055                 DRM_ERROR("Failed to allocate a surface id.\n");
1056                 goto out_no_id;
1057         }
1058
1059         if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
1060                 ret = -EBUSY;
1061                 goto out_no_fifo;
1062         }
1063
1064         if (dev_priv->has_sm4_1 && srf->array_size > 0) {
1065                 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
1066                 cmd_len = sizeof(cmd3->body);
1067                 submit_len = sizeof(*cmd3);
1068         } else if (srf->array_size > 0) {
1069                 /* has_dx checked on creation time. */
1070                 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
1071                 cmd_len = sizeof(cmd2->body);
1072                 submit_len = sizeof(*cmd2);
1073         } else {
1074                 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1075                 cmd_len = sizeof(cmd->body);
1076                 submit_len = sizeof(*cmd);
1077         }
1078
1079         cmd = VMW_FIFO_RESERVE(dev_priv, submit_len);
1080         cmd2 = (typeof(cmd2))cmd;
1081         cmd3 = (typeof(cmd3))cmd;
1082         if (unlikely(!cmd)) {
1083                 ret = -ENOMEM;
1084                 goto out_no_fifo;
1085         }
1086
1087         if (dev_priv->has_sm4_1 && srf->array_size > 0) {
1088                 cmd3->header.id = cmd_id;
1089                 cmd3->header.size = cmd_len;
1090                 cmd3->body.sid = srf->res.id;
1091                 cmd3->body.surfaceFlags = srf->flags;
1092                 cmd3->body.format = srf->format;
1093                 cmd3->body.numMipLevels = srf->mip_levels[0];
1094                 cmd3->body.multisampleCount = srf->multisample_count;
1095                 cmd3->body.multisamplePattern = srf->multisample_pattern;
1096                 cmd3->body.qualityLevel = srf->quality_level;
1097                 cmd3->body.autogenFilter = srf->autogen_filter;
1098                 cmd3->body.size.width = srf->base_size.width;
1099                 cmd3->body.size.height = srf->base_size.height;
1100                 cmd3->body.size.depth = srf->base_size.depth;
1101                 cmd3->body.arraySize = srf->array_size;
1102         } else if (srf->array_size > 0) {
1103                 cmd2->header.id = cmd_id;
1104                 cmd2->header.size = cmd_len;
1105                 cmd2->body.sid = srf->res.id;
1106                 cmd2->body.surfaceFlags = srf->flags;
1107                 cmd2->body.format = srf->format;
1108                 cmd2->body.numMipLevels = srf->mip_levels[0];
1109                 cmd2->body.multisampleCount = srf->multisample_count;
1110                 cmd2->body.autogenFilter = srf->autogen_filter;
1111                 cmd2->body.size.width = srf->base_size.width;
1112                 cmd2->body.size.height = srf->base_size.height;
1113                 cmd2->body.size.depth = srf->base_size.depth;
1114                 cmd2->body.arraySize = srf->array_size;
1115         } else {
1116                 cmd->header.id = cmd_id;
1117                 cmd->header.size = cmd_len;
1118                 cmd->body.sid = srf->res.id;
1119                 cmd->body.surfaceFlags = srf->flags;
1120                 cmd->body.format = srf->format;
1121                 cmd->body.numMipLevels = srf->mip_levels[0];
1122                 cmd->body.multisampleCount = srf->multisample_count;
1123                 cmd->body.autogenFilter = srf->autogen_filter;
1124                 cmd->body.size.width = srf->base_size.width;
1125                 cmd->body.size.height = srf->base_size.height;
1126                 cmd->body.size.depth = srf->base_size.depth;
1127         }
1128
1129         vmw_fifo_commit(dev_priv, submit_len);
1130
1131         return 0;
1132
1133 out_no_fifo:
1134         vmw_resource_release_id(res);
1135 out_no_id:
1136         vmw_fifo_resource_dec(dev_priv);
1137         return ret;
1138 }
1139
1140
1141 static int vmw_gb_surface_bind(struct vmw_resource *res,
1142                                struct ttm_validate_buffer *val_buf)
1143 {
1144         struct vmw_private *dev_priv = res->dev_priv;
1145         struct {
1146                 SVGA3dCmdHeader header;
1147                 SVGA3dCmdBindGBSurface body;
1148         } *cmd1;
1149         struct {
1150                 SVGA3dCmdHeader header;
1151                 SVGA3dCmdUpdateGBSurface body;
1152         } *cmd2;
1153         uint32_t submit_size;
1154         struct ttm_buffer_object *bo = val_buf->bo;
1155
1156         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1157
1158         submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1159
1160         cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
1161         if (unlikely(!cmd1))
1162                 return -ENOMEM;
1163
1164         cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1165         cmd1->header.size = sizeof(cmd1->body);
1166         cmd1->body.sid = res->id;
1167         cmd1->body.mobid = bo->mem.start;
1168         if (res->backup_dirty) {
1169                 cmd2 = (void *) &cmd1[1];
1170                 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1171                 cmd2->header.size = sizeof(cmd2->body);
1172                 cmd2->body.sid = res->id;
1173                 res->backup_dirty = false;
1174         }
1175         vmw_fifo_commit(dev_priv, submit_size);
1176
1177         return 0;
1178 }
1179
1180 static int vmw_gb_surface_unbind(struct vmw_resource *res,
1181                                  bool readback,
1182                                  struct ttm_validate_buffer *val_buf)
1183 {
1184         struct vmw_private *dev_priv = res->dev_priv;
1185         struct ttm_buffer_object *bo = val_buf->bo;
1186         struct vmw_fence_obj *fence;
1187
1188         struct {
1189                 SVGA3dCmdHeader header;
1190                 SVGA3dCmdReadbackGBSurface body;
1191         } *cmd1;
1192         struct {
1193                 SVGA3dCmdHeader header;
1194                 SVGA3dCmdInvalidateGBSurface body;
1195         } *cmd2;
1196         struct {
1197                 SVGA3dCmdHeader header;
1198                 SVGA3dCmdBindGBSurface body;
1199         } *cmd3;
1200         uint32_t submit_size;
1201         uint8_t *cmd;
1202
1203
1204         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1205
1206         submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1207         cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
1208         if (unlikely(!cmd))
1209                 return -ENOMEM;
1210
1211         if (readback) {
1212                 cmd1 = (void *) cmd;
1213                 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1214                 cmd1->header.size = sizeof(cmd1->body);
1215                 cmd1->body.sid = res->id;
1216                 cmd3 = (void *) &cmd1[1];
1217         } else {
1218                 cmd2 = (void *) cmd;
1219                 cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1220                 cmd2->header.size = sizeof(cmd2->body);
1221                 cmd2->body.sid = res->id;
1222                 cmd3 = (void *) &cmd2[1];
1223         }
1224
1225         cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1226         cmd3->header.size = sizeof(cmd3->body);
1227         cmd3->body.sid = res->id;
1228         cmd3->body.mobid = SVGA3D_INVALID_ID;
1229
1230         vmw_fifo_commit(dev_priv, submit_size);
1231
1232         /*
1233          * Create a fence object and fence the backup buffer.
1234          */
1235
1236         (void) vmw_execbuf_fence_commands(NULL, dev_priv,
1237                                           &fence, NULL);
1238
1239         vmw_bo_fence_single(val_buf->bo, fence);
1240
1241         if (likely(fence != NULL))
1242                 vmw_fence_obj_unreference(&fence);
1243
1244         return 0;
1245 }
1246
1247 static int vmw_gb_surface_destroy(struct vmw_resource *res)
1248 {
1249         struct vmw_private *dev_priv = res->dev_priv;
1250         struct vmw_surface *srf = vmw_res_to_srf(res);
1251         struct {
1252                 SVGA3dCmdHeader header;
1253                 SVGA3dCmdDestroyGBSurface body;
1254         } *cmd;
1255
1256         if (likely(res->id == -1))
1257                 return 0;
1258
1259         mutex_lock(&dev_priv->binding_mutex);
1260         vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
1261         vmw_binding_res_list_scrub(&res->binding_head);
1262
1263         cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
1264         if (unlikely(!cmd)) {
1265                 mutex_unlock(&dev_priv->binding_mutex);
1266                 return -ENOMEM;
1267         }
1268
1269         cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1270         cmd->header.size = sizeof(cmd->body);
1271         cmd->body.sid = res->id;
1272         vmw_fifo_commit(dev_priv, sizeof(*cmd));
1273         mutex_unlock(&dev_priv->binding_mutex);
1274         vmw_resource_release_id(res);
1275         vmw_fifo_resource_dec(dev_priv);
1276
1277         return 0;
1278 }
1279
1280
1281 /**
1282  * vmw_gb_surface_define_ioctl - Ioctl function implementing
1283  * the user surface define functionality.
1284  *
1285  * @dev: Pointer to a struct drm_device.
1286  * @data: Pointer to data copied from / to user-space.
1287  * @file_priv: Pointer to a drm file private structure.
1288  */
1289 int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1290                                 struct drm_file *file_priv)
1291 {
1292         union drm_vmw_gb_surface_create_arg *arg =
1293             (union drm_vmw_gb_surface_create_arg *)data;
1294         struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1295         struct drm_vmw_gb_surface_create_ext_req req_ext;
1296
1297         req_ext.base = arg->req;
1298         req_ext.version = drm_vmw_gb_surface_v1;
1299         req_ext.svga3d_flags_upper_32_bits = 0;
1300         req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
1301         req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
1302         req_ext.must_be_zero = 0;
1303
1304         return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
1305 }
1306
1307 /**
1308  * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1309  * the user surface reference functionality.
1310  *
1311  * @dev: Pointer to a struct drm_device.
1312  * @data: Pointer to data copied from / to user-space.
1313  * @file_priv: Pointer to a drm file private structure.
1314  */
1315 int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1316                                    struct drm_file *file_priv)
1317 {
1318         union drm_vmw_gb_surface_reference_arg *arg =
1319             (union drm_vmw_gb_surface_reference_arg *)data;
1320         struct drm_vmw_surface_arg *req = &arg->req;
1321         struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
1322         struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
1323         int ret;
1324
1325         ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
1326
1327         if (unlikely(ret != 0))
1328                 return ret;
1329
1330         rep->creq = rep_ext.creq.base;
1331         rep->crep = rep_ext.crep;
1332
1333         return ret;
1334 }
1335
1336 /**
1337  * vmw_surface_gb_priv_define - Define a private GB surface
1338  *
1339  * @dev:  Pointer to a struct drm_device
1340  * @user_accounting_size:  Used to track user-space memory usage, set
1341  *                         to 0 for kernel mode only memory
1342  * @svga3d_flags: SVGA3d surface flags for the device
1343  * @format: requested surface format
1344  * @for_scanout: true if inteded to be used for scanout buffer
1345  * @num_mip_levels:  number of MIP levels
1346  * @multisample_count:
1347  * @array_size: Surface array size.
1348  * @size: width, heigh, depth of the surface requested
1349  * @multisample_pattern: Multisampling pattern when msaa is supported
1350  * @quality_level: Precision settings
1351  * @user_srf_out: allocated user_srf.  Set to NULL on failure.
1352  *
1353  * GB surfaces allocated by this function will not have a user mode handle, and
1354  * thus will only be visible to vmwgfx.  For optimization reasons the
1355  * surface may later be given a user mode handle by another function to make
1356  * it available to user mode drivers.
1357  */
1358 int vmw_surface_gb_priv_define(struct drm_device *dev,
1359                                uint32_t user_accounting_size,
1360                                SVGA3dSurfaceAllFlags svga3d_flags,
1361                                SVGA3dSurfaceFormat format,
1362                                bool for_scanout,
1363                                uint32_t num_mip_levels,
1364                                uint32_t multisample_count,
1365                                uint32_t array_size,
1366                                struct drm_vmw_size size,
1367                                SVGA3dMSPattern multisample_pattern,
1368                                SVGA3dMSQualityLevel quality_level,
1369                                struct vmw_surface **srf_out)
1370 {
1371         struct vmw_private *dev_priv = vmw_priv(dev);
1372         struct vmw_user_surface *user_srf;
1373         struct ttm_operation_ctx ctx = {
1374                 .interruptible = true,
1375                 .no_wait_gpu = false
1376         };
1377         struct vmw_surface *srf;
1378         int ret;
1379         u32 num_layers = 1;
1380         u32 sample_count = 1;
1381
1382         *srf_out = NULL;
1383
1384         if (for_scanout) {
1385                 if (!svga3dsurface_is_screen_target_format(format)) {
1386                         VMW_DEBUG_USER("Invalid Screen Target surface format.");
1387                         return -EINVAL;
1388                 }
1389
1390                 if (size.width > dev_priv->texture_max_width ||
1391                     size.height > dev_priv->texture_max_height) {
1392                         VMW_DEBUG_USER("%ux%u\n, exceeds max surface size %ux%u",
1393                                        size.width, size.height,
1394                                        dev_priv->texture_max_width,
1395                                        dev_priv->texture_max_height);
1396                         return -EINVAL;
1397                 }
1398         } else {
1399                 const struct svga3d_surface_desc *desc;
1400
1401                 desc = svga3dsurface_get_desc(format);
1402                 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
1403                         VMW_DEBUG_USER("Invalid surface format.\n");
1404                         return -EINVAL;
1405                 }
1406         }
1407
1408         /* array_size must be null for non-GL3 host. */
1409         if (array_size > 0 && !dev_priv->has_dx) {
1410                 VMW_DEBUG_USER("Tried to create DX surface on non-DX host.\n");
1411                 return -EINVAL;
1412         }
1413
1414         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1415         if (unlikely(ret != 0))
1416                 return ret;
1417
1418         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1419                                    user_accounting_size, &ctx);
1420         if (unlikely(ret != 0)) {
1421                 if (ret != -ERESTARTSYS)
1422                         DRM_ERROR("Out of graphics memory for surface"
1423                                   " creation.\n");
1424                 goto out_unlock;
1425         }
1426
1427         user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1428         if (unlikely(!user_srf)) {
1429                 ret = -ENOMEM;
1430                 goto out_no_user_srf;
1431         }
1432
1433         *srf_out  = &user_srf->srf;
1434         user_srf->size = user_accounting_size;
1435         user_srf->prime.base.shareable = false;
1436         user_srf->prime.base.tfile     = NULL;
1437
1438         srf = &user_srf->srf;
1439         srf->flags             = svga3d_flags;
1440         srf->format            = format;
1441         srf->scanout           = for_scanout;
1442         srf->mip_levels[0]     = num_mip_levels;
1443         srf->num_sizes         = 1;
1444         srf->sizes             = NULL;
1445         srf->offsets           = NULL;
1446         srf->base_size         = size;
1447         srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
1448         srf->array_size        = array_size;
1449         srf->multisample_count = multisample_count;
1450         srf->multisample_pattern = multisample_pattern;
1451         srf->quality_level = quality_level;
1452
1453         if (array_size)
1454                 num_layers = array_size;
1455         else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
1456                 num_layers = SVGA3D_MAX_SURFACE_FACES;
1457
1458         if (srf->flags & SVGA3D_SURFACE_MULTISAMPLE)
1459                 sample_count = srf->multisample_count;
1460
1461         srf->res.backup_size   =
1462                 svga3dsurface_get_serialized_size_extended(srf->format,
1463                                                            srf->base_size,
1464                                                            srf->mip_levels[0],
1465                                                            num_layers,
1466                                                            sample_count);
1467
1468         if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
1469                 srf->res.backup_size += sizeof(SVGA3dDXSOState);
1470
1471         /*
1472          * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
1473          * size greater than STDU max width/height. This is really a workaround
1474          * to support creation of big framebuffer requested by some user-space
1475          * for whole topology. That big framebuffer won't really be used for
1476          * binding with screen target as during prepare_fb a separate surface is
1477          * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
1478          */
1479         if (dev_priv->active_display_unit == vmw_du_screen_target &&
1480             for_scanout && size.width <= dev_priv->stdu_max_width &&
1481             size.height <= dev_priv->stdu_max_height)
1482                 srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
1483
1484         /*
1485          * From this point, the generic resource management functions
1486          * destroy the object on failure.
1487          */
1488         ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1489
1490         ttm_read_unlock(&dev_priv->reservation_sem);
1491         return ret;
1492
1493 out_no_user_srf:
1494         ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
1495
1496 out_unlock:
1497         ttm_read_unlock(&dev_priv->reservation_sem);
1498         return ret;
1499 }
1500
1501 /**
1502  * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
1503  * the user surface define functionality.
1504  *
1505  * @dev: Pointer to a struct drm_device.
1506  * @data: Pointer to data copied from / to user-space.
1507  * @file_priv: Pointer to a drm file private structure.
1508  */
1509 int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
1510                                 struct drm_file *file_priv)
1511 {
1512         union drm_vmw_gb_surface_create_ext_arg *arg =
1513             (union drm_vmw_gb_surface_create_ext_arg *)data;
1514         struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
1515         struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1516
1517         return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
1518 }
1519
1520 /**
1521  * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
1522  * the user surface reference functionality.
1523  *
1524  * @dev: Pointer to a struct drm_device.
1525  * @data: Pointer to data copied from / to user-space.
1526  * @file_priv: Pointer to a drm file private structure.
1527  */
1528 int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
1529                                    struct drm_file *file_priv)
1530 {
1531         union drm_vmw_gb_surface_reference_ext_arg *arg =
1532             (union drm_vmw_gb_surface_reference_ext_arg *)data;
1533         struct drm_vmw_surface_arg *req = &arg->req;
1534         struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
1535
1536         return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
1537 }
1538
1539 /**
1540  * vmw_gb_surface_define_internal - Ioctl function implementing
1541  * the user surface define functionality.
1542  *
1543  * @dev: Pointer to a struct drm_device.
1544  * @req: Request argument from user-space.
1545  * @rep: Response argument to user-space.
1546  * @file_priv: Pointer to a drm file private structure.
1547  */
1548 static int
1549 vmw_gb_surface_define_internal(struct drm_device *dev,
1550                                struct drm_vmw_gb_surface_create_ext_req *req,
1551                                struct drm_vmw_gb_surface_create_rep *rep,
1552                                struct drm_file *file_priv)
1553 {
1554         struct vmw_private *dev_priv = vmw_priv(dev);
1555         struct vmw_user_surface *user_srf;
1556         struct vmw_surface *srf;
1557         struct vmw_resource *res;
1558         struct vmw_resource *tmp;
1559         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1560         int ret;
1561         uint32_t size;
1562         uint32_t backup_handle = 0;
1563         SVGA3dSurfaceAllFlags svga3d_flags_64 =
1564                 SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
1565                                 req->base.svga3d_flags);
1566
1567         if (!dev_priv->has_sm4_1) {
1568                 /*
1569                  * If SM4_1 is not support then cannot send 64-bit flag to
1570                  * device.
1571                  */
1572                 if (req->svga3d_flags_upper_32_bits != 0)
1573                         return -EINVAL;
1574
1575                 if (req->base.multisample_count != 0)
1576                         return -EINVAL;
1577
1578                 if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
1579                         return -EINVAL;
1580
1581                 if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
1582                         return -EINVAL;
1583         }
1584
1585         if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
1586             req->base.multisample_count == 0)
1587                 return -EINVAL;
1588
1589         if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS)
1590                 return -EINVAL;
1591
1592         if (unlikely(vmw_user_surface_size == 0))
1593                 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1594                         VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
1595
1596         size = vmw_user_surface_size;
1597
1598         /* Define a surface based on the parameters. */
1599         ret = vmw_surface_gb_priv_define(dev,
1600                                          size,
1601                                          svga3d_flags_64,
1602                                          req->base.format,
1603                                          req->base.drm_surface_flags &
1604                                          drm_vmw_surface_flag_scanout,
1605                                          req->base.mip_levels,
1606                                          req->base.multisample_count,
1607                                          req->base.array_size,
1608                                          req->base.base_size,
1609                                          req->multisample_pattern,
1610                                          req->quality_level,
1611                                          &srf);
1612         if (unlikely(ret != 0))
1613                 return ret;
1614
1615         user_srf = container_of(srf, struct vmw_user_surface, srf);
1616         if (drm_is_primary_client(file_priv))
1617                 user_srf->master = drm_master_get(file_priv->master);
1618
1619         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1620         if (unlikely(ret != 0))
1621                 return ret;
1622
1623         res = &user_srf->srf.res;
1624
1625         if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
1626                 ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle,
1627                                          &res->backup,
1628                                          &user_srf->backup_base);
1629                 if (ret == 0) {
1630                         if (res->backup->base.num_pages * PAGE_SIZE <
1631                             res->backup_size) {
1632                                 VMW_DEBUG_USER("Surface backup buffer too small.\n");
1633                                 vmw_bo_unreference(&res->backup);
1634                                 ret = -EINVAL;
1635                                 goto out_unlock;
1636                         } else {
1637                                 backup_handle = req->base.buffer_handle;
1638                         }
1639                 }
1640         } else if (req->base.drm_surface_flags &
1641                    drm_vmw_surface_flag_create_buffer)
1642                 ret = vmw_user_bo_alloc(dev_priv, tfile,
1643                                         res->backup_size,
1644                                         req->base.drm_surface_flags &
1645                                         drm_vmw_surface_flag_shareable,
1646                                         &backup_handle,
1647                                         &res->backup,
1648                                         &user_srf->backup_base);
1649
1650         if (unlikely(ret != 0)) {
1651                 vmw_resource_unreference(&res);
1652                 goto out_unlock;
1653         }
1654
1655         tmp = vmw_resource_reference(res);
1656         ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
1657                                     req->base.drm_surface_flags &
1658                                     drm_vmw_surface_flag_shareable,
1659                                     VMW_RES_SURFACE,
1660                                     &vmw_user_surface_base_release, NULL);
1661
1662         if (unlikely(ret != 0)) {
1663                 vmw_resource_unreference(&tmp);
1664                 vmw_resource_unreference(&res);
1665                 goto out_unlock;
1666         }
1667
1668         rep->handle      = user_srf->prime.base.handle;
1669         rep->backup_size = res->backup_size;
1670         if (res->backup) {
1671                 rep->buffer_map_handle =
1672                         drm_vma_node_offset_addr(&res->backup->base.vma_node);
1673                 rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
1674                 rep->buffer_handle = backup_handle;
1675         } else {
1676                 rep->buffer_map_handle = 0;
1677                 rep->buffer_size = 0;
1678                 rep->buffer_handle = SVGA3D_INVALID_ID;
1679         }
1680
1681         vmw_resource_unreference(&res);
1682
1683 out_unlock:
1684         ttm_read_unlock(&dev_priv->reservation_sem);
1685         return ret;
1686 }
1687
1688 /**
1689  * vmw_gb_surface_reference_internal - Ioctl function implementing
1690  * the user surface reference functionality.
1691  *
1692  * @dev: Pointer to a struct drm_device.
1693  * @req: Pointer to user-space request surface arg.
1694  * @rep: Pointer to response to user-space.
1695  * @file_priv: Pointer to a drm file private structure.
1696  */
1697 static int
1698 vmw_gb_surface_reference_internal(struct drm_device *dev,
1699                                   struct drm_vmw_surface_arg *req,
1700                                   struct drm_vmw_gb_surface_ref_ext_rep *rep,
1701                                   struct drm_file *file_priv)
1702 {
1703         struct vmw_private *dev_priv = vmw_priv(dev);
1704         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1705         struct vmw_surface *srf;
1706         struct vmw_user_surface *user_srf;
1707         struct ttm_base_object *base;
1708         uint32_t backup_handle;
1709         int ret = -EINVAL;
1710
1711         ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1712                                            req->handle_type, &base);
1713         if (unlikely(ret != 0))
1714                 return ret;
1715
1716         user_srf = container_of(base, struct vmw_user_surface, prime.base);
1717         srf = &user_srf->srf;
1718         if (!srf->res.backup) {
1719                 DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1720                 goto out_bad_resource;
1721         }
1722
1723         mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1724         ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
1725         mutex_unlock(&dev_priv->cmdbuf_mutex);
1726
1727         if (unlikely(ret != 0)) {
1728                 DRM_ERROR("Could not add a reference to a GB surface "
1729                           "backup buffer.\n");
1730                 (void) ttm_ref_object_base_unref(tfile, base->handle,
1731                                                  TTM_REF_USAGE);
1732                 goto out_bad_resource;
1733         }
1734
1735         rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(srf->flags);
1736         rep->creq.base.format = srf->format;
1737         rep->creq.base.mip_levels = srf->mip_levels[0];
1738         rep->creq.base.drm_surface_flags = 0;
1739         rep->creq.base.multisample_count = srf->multisample_count;
1740         rep->creq.base.autogen_filter = srf->autogen_filter;
1741         rep->creq.base.array_size = srf->array_size;
1742         rep->creq.base.buffer_handle = backup_handle;
1743         rep->creq.base.base_size = srf->base_size;
1744         rep->crep.handle = user_srf->prime.base.handle;
1745         rep->crep.backup_size = srf->res.backup_size;
1746         rep->crep.buffer_handle = backup_handle;
1747         rep->crep.buffer_map_handle =
1748                 drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
1749         rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
1750
1751         rep->creq.version = drm_vmw_gb_surface_v1;
1752         rep->creq.svga3d_flags_upper_32_bits =
1753                 SVGA3D_FLAGS_UPPER_32(srf->flags);
1754         rep->creq.multisample_pattern = srf->multisample_pattern;
1755         rep->creq.quality_level = srf->quality_level;
1756         rep->creq.must_be_zero = 0;
1757
1758 out_bad_resource:
1759         ttm_base_object_unref(&base);
1760
1761         return ret;
1762 }