Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
[linux-2.6-microblaze.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fifo.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <linux/sched/signal.h>
29
30 #include <drm/ttm/ttm_placement.h>
31
32 #include "vmwgfx_drv.h"
33
34 struct vmw_temp_set_context {
35         SVGA3dCmdHeader header;
36         SVGA3dCmdDXTempSetContext body;
37 };
38
39 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
40 {
41         u32 *fifo_mem = dev_priv->mmio_virt;
42         uint32_t fifo_min, hwversion;
43         const struct vmw_fifo_state *fifo = &dev_priv->fifo;
44
45         if (!(dev_priv->capabilities & SVGA_CAP_3D))
46                 return false;
47
48         if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
49                 uint32_t result;
50
51                 if (!dev_priv->has_mob)
52                         return false;
53
54                 spin_lock(&dev_priv->cap_lock);
55                 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
56                 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
57                 spin_unlock(&dev_priv->cap_lock);
58
59                 return (result != 0);
60         }
61
62         if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
63                 return false;
64
65         fifo_min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
66         if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
67                 return false;
68
69         hwversion = vmw_mmio_read(fifo_mem +
70                                   ((fifo->capabilities &
71                                     SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
72                                    SVGA_FIFO_3D_HWVERSION_REVISED :
73                                    SVGA_FIFO_3D_HWVERSION));
74
75         if (hwversion == 0)
76                 return false;
77
78         if (hwversion < SVGA3D_HWVERSION_WS8_B1)
79                 return false;
80
81         /* Legacy Display Unit does not support surfaces */
82         if (dev_priv->active_display_unit == vmw_du_legacy)
83                 return false;
84
85         return true;
86 }
87
88 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
89 {
90         u32  *fifo_mem = dev_priv->mmio_virt;
91         uint32_t caps;
92
93         if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
94                 return false;
95
96         caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
97         if (caps & SVGA_FIFO_CAP_PITCHLOCK)
98                 return true;
99
100         return false;
101 }
102
103 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
104 {
105         u32  *fifo_mem = dev_priv->mmio_virt;
106         uint32_t max;
107         uint32_t min;
108
109         fifo->dx = false;
110         fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
111         fifo->static_buffer = vmalloc(fifo->static_buffer_size);
112         if (unlikely(fifo->static_buffer == NULL))
113                 return -ENOMEM;
114
115         fifo->dynamic_buffer = NULL;
116         fifo->reserved_size = 0;
117         fifo->using_bounce_buffer = false;
118
119         mutex_init(&fifo->fifo_mutex);
120         init_rwsem(&fifo->rwsem);
121
122         DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
123         DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
124         DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
125
126         dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
127         dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
128         dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
129
130         vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
131                   SVGA_REG_ENABLE_HIDE);
132         vmw_write(dev_priv, SVGA_REG_TRACES, 0);
133
134         min = 4;
135         if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
136                 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
137         min <<= 2;
138
139         if (min < PAGE_SIZE)
140                 min = PAGE_SIZE;
141
142         vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
143         vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
144         wmb();
145         vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
146         vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_STOP);
147         vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
148         mb();
149
150         vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
151
152         max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
153         min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
154         fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
155
156         DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
157                  (unsigned int) max,
158                  (unsigned int) min,
159                  (unsigned int) fifo->capabilities);
160
161         atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
162         vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
163         vmw_marker_queue_init(&fifo->marker_queue);
164
165         return 0;
166 }
167
168 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
169 {
170         u32 *fifo_mem = dev_priv->mmio_virt;
171
172         preempt_disable();
173         if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
174                 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
175         preempt_enable();
176 }
177
178 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
179 {
180         u32  *fifo_mem = dev_priv->mmio_virt;
181
182         vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
183         while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
184                 ;
185
186         dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
187
188         vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
189                   dev_priv->config_done_state);
190         vmw_write(dev_priv, SVGA_REG_ENABLE,
191                   dev_priv->enable_state);
192         vmw_write(dev_priv, SVGA_REG_TRACES,
193                   dev_priv->traces_state);
194
195         vmw_marker_queue_takedown(&fifo->marker_queue);
196
197         if (likely(fifo->static_buffer != NULL)) {
198                 vfree(fifo->static_buffer);
199                 fifo->static_buffer = NULL;
200         }
201
202         if (likely(fifo->dynamic_buffer != NULL)) {
203                 vfree(fifo->dynamic_buffer);
204                 fifo->dynamic_buffer = NULL;
205         }
206 }
207
208 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
209 {
210         u32  *fifo_mem = dev_priv->mmio_virt;
211         uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
212         uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
213         uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
214         uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
215
216         return ((max - next_cmd) + (stop - min) <= bytes);
217 }
218
219 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
220                                uint32_t bytes, bool interruptible,
221                                unsigned long timeout)
222 {
223         int ret = 0;
224         unsigned long end_jiffies = jiffies + timeout;
225         DEFINE_WAIT(__wait);
226
227         DRM_INFO("Fifo wait noirq.\n");
228
229         for (;;) {
230                 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
231                                 (interruptible) ?
232                                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
233                 if (!vmw_fifo_is_full(dev_priv, bytes))
234                         break;
235                 if (time_after_eq(jiffies, end_jiffies)) {
236                         ret = -EBUSY;
237                         DRM_ERROR("SVGA device lockup.\n");
238                         break;
239                 }
240                 schedule_timeout(1);
241                 if (interruptible && signal_pending(current)) {
242                         ret = -ERESTARTSYS;
243                         break;
244                 }
245         }
246         finish_wait(&dev_priv->fifo_queue, &__wait);
247         wake_up_all(&dev_priv->fifo_queue);
248         DRM_INFO("Fifo noirq exit.\n");
249         return ret;
250 }
251
252 static int vmw_fifo_wait(struct vmw_private *dev_priv,
253                          uint32_t bytes, bool interruptible,
254                          unsigned long timeout)
255 {
256         long ret = 1L;
257
258         if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
259                 return 0;
260
261         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
262         if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
263                 return vmw_fifo_wait_noirq(dev_priv, bytes,
264                                            interruptible, timeout);
265
266         vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
267                                &dev_priv->fifo_queue_waiters);
268
269         if (interruptible)
270                 ret = wait_event_interruptible_timeout
271                     (dev_priv->fifo_queue,
272                      !vmw_fifo_is_full(dev_priv, bytes), timeout);
273         else
274                 ret = wait_event_timeout
275                     (dev_priv->fifo_queue,
276                      !vmw_fifo_is_full(dev_priv, bytes), timeout);
277
278         if (unlikely(ret == 0))
279                 ret = -EBUSY;
280         else if (likely(ret > 0))
281                 ret = 0;
282
283         vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
284                                   &dev_priv->fifo_queue_waiters);
285
286         return ret;
287 }
288
289 /**
290  * Reserve @bytes number of bytes in the fifo.
291  *
292  * This function will return NULL (error) on two conditions:
293  *  If it timeouts waiting for fifo space, or if @bytes is larger than the
294  *   available fifo space.
295  *
296  * Returns:
297  *   Pointer to the fifo, or null on error (possible hardware hang).
298  */
299 static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
300                                     uint32_t bytes)
301 {
302         struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
303         u32  *fifo_mem = dev_priv->mmio_virt;
304         uint32_t max;
305         uint32_t min;
306         uint32_t next_cmd;
307         uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
308         int ret;
309
310         mutex_lock(&fifo_state->fifo_mutex);
311         max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
312         min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
313         next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
314
315         if (unlikely(bytes >= (max - min)))
316                 goto out_err;
317
318         BUG_ON(fifo_state->reserved_size != 0);
319         BUG_ON(fifo_state->dynamic_buffer != NULL);
320
321         fifo_state->reserved_size = bytes;
322
323         while (1) {
324                 uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
325                 bool need_bounce = false;
326                 bool reserve_in_place = false;
327
328                 if (next_cmd >= stop) {
329                         if (likely((next_cmd + bytes < max ||
330                                     (next_cmd + bytes == max && stop > min))))
331                                 reserve_in_place = true;
332
333                         else if (vmw_fifo_is_full(dev_priv, bytes)) {
334                                 ret = vmw_fifo_wait(dev_priv, bytes,
335                                                     false, 3 * HZ);
336                                 if (unlikely(ret != 0))
337                                         goto out_err;
338                         } else
339                                 need_bounce = true;
340
341                 } else {
342
343                         if (likely((next_cmd + bytes < stop)))
344                                 reserve_in_place = true;
345                         else {
346                                 ret = vmw_fifo_wait(dev_priv, bytes,
347                                                     false, 3 * HZ);
348                                 if (unlikely(ret != 0))
349                                         goto out_err;
350                         }
351                 }
352
353                 if (reserve_in_place) {
354                         if (reserveable || bytes <= sizeof(uint32_t)) {
355                                 fifo_state->using_bounce_buffer = false;
356
357                                 if (reserveable)
358                                         vmw_mmio_write(bytes, fifo_mem +
359                                                        SVGA_FIFO_RESERVED);
360                                 return (void __force *) (fifo_mem +
361                                                          (next_cmd >> 2));
362                         } else {
363                                 need_bounce = true;
364                         }
365                 }
366
367                 if (need_bounce) {
368                         fifo_state->using_bounce_buffer = true;
369                         if (bytes < fifo_state->static_buffer_size)
370                                 return fifo_state->static_buffer;
371                         else {
372                                 fifo_state->dynamic_buffer = vmalloc(bytes);
373                                 if (!fifo_state->dynamic_buffer)
374                                         goto out_err;
375                                 return fifo_state->dynamic_buffer;
376                         }
377                 }
378         }
379 out_err:
380         fifo_state->reserved_size = 0;
381         mutex_unlock(&fifo_state->fifo_mutex);
382
383         return NULL;
384 }
385
386 void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
387                           int ctx_id)
388 {
389         void *ret;
390
391         if (dev_priv->cman)
392                 ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
393                                          ctx_id, false, NULL);
394         else if (ctx_id == SVGA3D_INVALID_ID)
395                 ret = vmw_local_fifo_reserve(dev_priv, bytes);
396         else {
397                 WARN(1, "Command buffer has not been allocated.\n");
398                 ret = NULL;
399         }
400         if (IS_ERR_OR_NULL(ret))
401                 return NULL;
402
403         return ret;
404 }
405
406 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
407                               u32  *fifo_mem,
408                               uint32_t next_cmd,
409                               uint32_t max, uint32_t min, uint32_t bytes)
410 {
411         uint32_t chunk_size = max - next_cmd;
412         uint32_t rest;
413         uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
414             fifo_state->dynamic_buffer : fifo_state->static_buffer;
415
416         if (bytes < chunk_size)
417                 chunk_size = bytes;
418
419         vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
420         mb();
421         memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
422         rest = bytes - chunk_size;
423         if (rest)
424                 memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
425 }
426
427 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
428                                u32  *fifo_mem,
429                                uint32_t next_cmd,
430                                uint32_t max, uint32_t min, uint32_t bytes)
431 {
432         uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
433             fifo_state->dynamic_buffer : fifo_state->static_buffer;
434
435         while (bytes > 0) {
436                 vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
437                 next_cmd += sizeof(uint32_t);
438                 if (unlikely(next_cmd == max))
439                         next_cmd = min;
440                 mb();
441                 vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
442                 mb();
443                 bytes -= sizeof(uint32_t);
444         }
445 }
446
447 static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
448 {
449         struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
450         u32  *fifo_mem = dev_priv->mmio_virt;
451         uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
452         uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
453         uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
454         bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
455
456         if (fifo_state->dx)
457                 bytes += sizeof(struct vmw_temp_set_context);
458
459         fifo_state->dx = false;
460         BUG_ON((bytes & 3) != 0);
461         BUG_ON(bytes > fifo_state->reserved_size);
462
463         fifo_state->reserved_size = 0;
464
465         if (fifo_state->using_bounce_buffer) {
466                 if (reserveable)
467                         vmw_fifo_res_copy(fifo_state, fifo_mem,
468                                           next_cmd, max, min, bytes);
469                 else
470                         vmw_fifo_slow_copy(fifo_state, fifo_mem,
471                                            next_cmd, max, min, bytes);
472
473                 if (fifo_state->dynamic_buffer) {
474                         vfree(fifo_state->dynamic_buffer);
475                         fifo_state->dynamic_buffer = NULL;
476                 }
477
478         }
479
480         down_write(&fifo_state->rwsem);
481         if (fifo_state->using_bounce_buffer || reserveable) {
482                 next_cmd += bytes;
483                 if (next_cmd >= max)
484                         next_cmd -= max - min;
485                 mb();
486                 vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
487         }
488
489         if (reserveable)
490                 vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
491         mb();
492         up_write(&fifo_state->rwsem);
493         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
494         mutex_unlock(&fifo_state->fifo_mutex);
495 }
496
497 void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
498 {
499         if (dev_priv->cman)
500                 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
501         else
502                 vmw_local_fifo_commit(dev_priv, bytes);
503 }
504
505
506 /**
507  * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
508  *
509  * @dev_priv: Pointer to device private structure.
510  * @bytes: Number of bytes to commit.
511  */
512 void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
513 {
514         if (dev_priv->cman)
515                 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
516         else
517                 vmw_local_fifo_commit(dev_priv, bytes);
518 }
519
520 /**
521  * vmw_fifo_flush - Flush any buffered commands and make sure command processing
522  * starts.
523  *
524  * @dev_priv: Pointer to device private structure.
525  * @interruptible: Whether to wait interruptible if function needs to sleep.
526  */
527 int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
528 {
529         might_sleep();
530
531         if (dev_priv->cman)
532                 return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
533         else
534                 return 0;
535 }
536
537 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
538 {
539         struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
540         struct svga_fifo_cmd_fence *cmd_fence;
541         u32 *fm;
542         int ret = 0;
543         uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
544
545         fm = VMW_FIFO_RESERVE(dev_priv, bytes);
546         if (unlikely(fm == NULL)) {
547                 *seqno = atomic_read(&dev_priv->marker_seq);
548                 ret = -ENOMEM;
549                 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
550                                         false, 3*HZ);
551                 goto out_err;
552         }
553
554         do {
555                 *seqno = atomic_add_return(1, &dev_priv->marker_seq);
556         } while (*seqno == 0);
557
558         if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
559
560                 /*
561                  * Don't request hardware to send a fence. The
562                  * waiting code in vmwgfx_irq.c will emulate this.
563                  */
564
565                 vmw_fifo_commit(dev_priv, 0);
566                 return 0;
567         }
568
569         *fm++ = SVGA_CMD_FENCE;
570         cmd_fence = (struct svga_fifo_cmd_fence *) fm;
571         cmd_fence->fence = *seqno;
572         vmw_fifo_commit_flush(dev_priv, bytes);
573         (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
574         vmw_update_seqno(dev_priv, fifo_state);
575
576 out_err:
577         return ret;
578 }
579
580 /**
581  * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
582  * legacy query commands.
583  *
584  * @dev_priv: The device private structure.
585  * @cid: The hardware context id used for the query.
586  *
587  * See the vmw_fifo_emit_dummy_query documentation.
588  */
589 static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
590                                             uint32_t cid)
591 {
592         /*
593          * A query wait without a preceding query end will
594          * actually finish all queries for this cid
595          * without writing to the query result structure.
596          */
597
598         struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
599         struct {
600                 SVGA3dCmdHeader header;
601                 SVGA3dCmdWaitForQuery body;
602         } *cmd;
603
604         cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
605         if (unlikely(cmd == NULL))
606                 return -ENOMEM;
607
608         cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
609         cmd->header.size = sizeof(cmd->body);
610         cmd->body.cid = cid;
611         cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
612
613         if (bo->mem.mem_type == TTM_PL_VRAM) {
614                 cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
615                 cmd->body.guestResult.offset = bo->offset;
616         } else {
617                 cmd->body.guestResult.gmrId = bo->mem.start;
618                 cmd->body.guestResult.offset = 0;
619         }
620
621         vmw_fifo_commit(dev_priv, sizeof(*cmd));
622
623         return 0;
624 }
625
626 /**
627  * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
628  * guest-backed resource query commands.
629  *
630  * @dev_priv: The device private structure.
631  * @cid: The hardware context id used for the query.
632  *
633  * See the vmw_fifo_emit_dummy_query documentation.
634  */
635 static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
636                                         uint32_t cid)
637 {
638         /*
639          * A query wait without a preceding query end will
640          * actually finish all queries for this cid
641          * without writing to the query result structure.
642          */
643
644         struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
645         struct {
646                 SVGA3dCmdHeader header;
647                 SVGA3dCmdWaitForGBQuery body;
648         } *cmd;
649
650         cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
651         if (unlikely(cmd == NULL))
652                 return -ENOMEM;
653
654         cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
655         cmd->header.size = sizeof(cmd->body);
656         cmd->body.cid = cid;
657         cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
658         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
659         cmd->body.mobid = bo->mem.start;
660         cmd->body.offset = 0;
661
662         vmw_fifo_commit(dev_priv, sizeof(*cmd));
663
664         return 0;
665 }
666
667
668 /**
669  * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
670  * appropriate resource query commands.
671  *
672  * @dev_priv: The device private structure.
673  * @cid: The hardware context id used for the query.
674  *
675  * This function is used to emit a dummy occlusion query with
676  * no primitives rendered between query begin and query end.
677  * It's used to provide a query barrier, in order to know that when
678  * this query is finished, all preceding queries are also finished.
679  *
680  * A Query results structure should have been initialized at the start
681  * of the dev_priv->dummy_query_bo buffer object. And that buffer object
682  * must also be either reserved or pinned when this function is called.
683  *
684  * Returns -ENOMEM on failure to reserve fifo space.
685  */
686 int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
687                               uint32_t cid)
688 {
689         if (dev_priv->has_mob)
690                 return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
691
692         return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
693 }