Merge tag 'for-linus-5.14-rc6-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / gpu / drm / vmwgfx / vmwgfx_cmdbuf.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <linux/dmapool.h>
29 #include <linux/pci.h>
30
31 #include <drm/ttm/ttm_bo_api.h>
32
33 #include "vmwgfx_drv.h"
34
35 /*
36  * Size of inline command buffers. Try to make sure that a page size is a
37  * multiple of the DMA pool allocation size.
38  */
39 #define VMW_CMDBUF_INLINE_ALIGN 64
40 #define VMW_CMDBUF_INLINE_SIZE \
41         (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
42
43 /**
44  * struct vmw_cmdbuf_context - Command buffer context queues
45  *
46  * @submitted: List of command buffers that have been submitted to the
47  * manager but not yet submitted to hardware.
48  * @hw_submitted: List of command buffers submitted to hardware.
49  * @preempted: List of preempted command buffers.
50  * @num_hw_submitted: Number of buffers currently being processed by hardware
51  * @block_submission: Identifies a block command submission.
52  */
53 struct vmw_cmdbuf_context {
54         struct list_head submitted;
55         struct list_head hw_submitted;
56         struct list_head preempted;
57         unsigned num_hw_submitted;
58         bool block_submission;
59 };
60
61 /**
62  * struct vmw_cmdbuf_man - Command buffer manager
63  *
64  * @cur_mutex: Mutex protecting the command buffer used for incremental small
65  * kernel command submissions, @cur.
66  * @space_mutex: Mutex to protect against starvation when we allocate
67  * main pool buffer space.
68  * @error_mutex: Mutex to serialize the work queue error handling.
69  * Note this is not needed if the same workqueue handler
70  * can't race with itself...
71  * @work: A struct work_struct implementeing command buffer error handling.
72  * Immutable.
73  * @dev_priv: Pointer to the device private struct. Immutable.
74  * @ctx: Array of command buffer context queues. The queues and the context
75  * data is protected by @lock.
76  * @error: List of command buffers that have caused device errors.
77  * Protected by @lock.
78  * @mm: Range manager for the command buffer space. Manager allocations and
79  * frees are protected by @lock.
80  * @cmd_space: Buffer object for the command buffer space, unless we were
81  * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
82  * @map_obj: Mapping state for @cmd_space. Immutable.
83  * @map: Pointer to command buffer space. May be a mapped buffer object or
84  * a contigous coherent DMA memory allocation. Immutable.
85  * @cur: Command buffer for small kernel command submissions. Protected by
86  * the @cur_mutex.
87  * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
88  * @default_size: Default size for the @cur command buffer. Immutable.
89  * @max_hw_submitted: Max number of in-flight command buffers the device can
90  * handle. Immutable.
91  * @lock: Spinlock protecting command submission queues.
92  * @headers: Pool of DMA memory for device command buffer headers.
93  * Internal protection.
94  * @dheaders: Pool of DMA memory for device command buffer headers with trailing
95  * space for inline data. Internal protection.
96  * @alloc_queue: Wait queue for processes waiting to allocate command buffer
97  * space.
98  * @idle_queue: Wait queue for processes waiting for command buffer idle.
99  * @irq_on: Whether the process function has requested irq to be turned on.
100  * Protected by @lock.
101  * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
102  * allocation. Immutable.
103  * @has_pool: Has a large pool of DMA memory which allows larger allocations.
104  * Typically this is false only during bootstrap.
105  * @handle: DMA address handle for the command buffer space if @using_mob is
106  * false. Immutable.
107  * @size: The size of the command buffer space. Immutable.
108  * @num_contexts: Number of contexts actually enabled.
109  */
110 struct vmw_cmdbuf_man {
111         struct mutex cur_mutex;
112         struct mutex space_mutex;
113         struct mutex error_mutex;
114         struct work_struct work;
115         struct vmw_private *dev_priv;
116         struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
117         struct list_head error;
118         struct drm_mm mm;
119         struct ttm_buffer_object *cmd_space;
120         struct ttm_bo_kmap_obj map_obj;
121         u8 *map;
122         struct vmw_cmdbuf_header *cur;
123         size_t cur_pos;
124         size_t default_size;
125         unsigned max_hw_submitted;
126         spinlock_t lock;
127         struct dma_pool *headers;
128         struct dma_pool *dheaders;
129         wait_queue_head_t alloc_queue;
130         wait_queue_head_t idle_queue;
131         bool irq_on;
132         bool using_mob;
133         bool has_pool;
134         dma_addr_t handle;
135         size_t size;
136         u32 num_contexts;
137 };
138
139 /**
140  * struct vmw_cmdbuf_header - Command buffer metadata
141  *
142  * @man: The command buffer manager.
143  * @cb_header: Device command buffer header, allocated from a DMA pool.
144  * @cb_context: The device command buffer context.
145  * @list: List head for attaching to the manager lists.
146  * @node: The range manager node.
147  * @handle: The DMA address of @cb_header. Handed to the device on command
148  * buffer submission.
149  * @cmd: Pointer to the command buffer space of this buffer.
150  * @size: Size of the command buffer space of this buffer.
151  * @reserved: Reserved space of this buffer.
152  * @inline_space: Whether inline command buffer space is used.
153  */
154 struct vmw_cmdbuf_header {
155         struct vmw_cmdbuf_man *man;
156         SVGACBHeader *cb_header;
157         SVGACBContext cb_context;
158         struct list_head list;
159         struct drm_mm_node node;
160         dma_addr_t handle;
161         u8 *cmd;
162         size_t size;
163         size_t reserved;
164         bool inline_space;
165 };
166
167 /**
168  * struct vmw_cmdbuf_dheader - Device command buffer header with inline
169  * command buffer space.
170  *
171  * @cb_header: Device command buffer header.
172  * @cmd: Inline command buffer space.
173  */
174 struct vmw_cmdbuf_dheader {
175         SVGACBHeader cb_header;
176         u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
177 };
178
179 /**
180  * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
181  *
182  * @page_size: Size of requested command buffer space in pages.
183  * @node: Pointer to the range manager node.
184  * @done: True if this allocation has succeeded.
185  */
186 struct vmw_cmdbuf_alloc_info {
187         size_t page_size;
188         struct drm_mm_node *node;
189         bool done;
190 };
191
192 /* Loop over each context in the command buffer manager. */
193 #define for_each_cmdbuf_ctx(_man, _i, _ctx)                             \
194         for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
195              ++(_i), ++(_ctx))
196
197 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
198                                 bool enable);
199 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
200
201 /**
202  * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
203  *
204  * @man: The range manager.
205  * @interruptible: Whether to wait interruptible when locking.
206  */
207 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
208 {
209         if (interruptible) {
210                 if (mutex_lock_interruptible(&man->cur_mutex))
211                         return -ERESTARTSYS;
212         } else {
213                 mutex_lock(&man->cur_mutex);
214         }
215
216         return 0;
217 }
218
219 /**
220  * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
221  *
222  * @man: The range manager.
223  */
224 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
225 {
226         mutex_unlock(&man->cur_mutex);
227 }
228
229 /**
230  * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
231  * been used for the device context with inline command buffers.
232  * Need not be called locked.
233  *
234  * @header: Pointer to the header to free.
235  */
236 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
237 {
238         struct vmw_cmdbuf_dheader *dheader;
239
240         if (WARN_ON_ONCE(!header->inline_space))
241                 return;
242
243         dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
244                                cb_header);
245         dma_pool_free(header->man->dheaders, dheader, header->handle);
246         kfree(header);
247 }
248
249 /**
250  * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
251  * associated structures.
252  *
253  * @header: Pointer to the header to free.
254  *
255  * For internal use. Must be called with man::lock held.
256  */
257 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
258 {
259         struct vmw_cmdbuf_man *man = header->man;
260
261         lockdep_assert_held_once(&man->lock);
262
263         if (header->inline_space) {
264                 vmw_cmdbuf_header_inline_free(header);
265                 return;
266         }
267
268         drm_mm_remove_node(&header->node);
269         wake_up_all(&man->alloc_queue);
270         if (header->cb_header)
271                 dma_pool_free(man->headers, header->cb_header,
272                               header->handle);
273         kfree(header);
274 }
275
276 /**
277  * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
278  * associated structures.
279  *
280  * @header: Pointer to the header to free.
281  */
282 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
283 {
284         struct vmw_cmdbuf_man *man = header->man;
285
286         /* Avoid locking if inline_space */
287         if (header->inline_space) {
288                 vmw_cmdbuf_header_inline_free(header);
289                 return;
290         }
291         spin_lock(&man->lock);
292         __vmw_cmdbuf_header_free(header);
293         spin_unlock(&man->lock);
294 }
295
296
297 /**
298  * vmw_cmdbuf_header_submit: Submit a command buffer to hardware.
299  *
300  * @header: The header of the buffer to submit.
301  */
302 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
303 {
304         struct vmw_cmdbuf_man *man = header->man;
305         u32 val;
306
307         val = upper_32_bits(header->handle);
308         vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
309
310         val = lower_32_bits(header->handle);
311         val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
312         vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
313
314         return header->cb_header->status;
315 }
316
317 /**
318  * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
319  *
320  * @ctx: The command buffer context to initialize
321  */
322 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
323 {
324         INIT_LIST_HEAD(&ctx->hw_submitted);
325         INIT_LIST_HEAD(&ctx->submitted);
326         INIT_LIST_HEAD(&ctx->preempted);
327         ctx->num_hw_submitted = 0;
328 }
329
330 /**
331  * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
332  * context.
333  *
334  * @man: The command buffer manager.
335  * @ctx: The command buffer context.
336  *
337  * Submits command buffers to hardware until there are no more command
338  * buffers to submit or the hardware can't handle more command buffers.
339  */
340 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
341                                   struct vmw_cmdbuf_context *ctx)
342 {
343         while (ctx->num_hw_submitted < man->max_hw_submitted &&
344                !list_empty(&ctx->submitted) &&
345                !ctx->block_submission) {
346                 struct vmw_cmdbuf_header *entry;
347                 SVGACBStatus status;
348
349                 entry = list_first_entry(&ctx->submitted,
350                                          struct vmw_cmdbuf_header,
351                                          list);
352
353                 status = vmw_cmdbuf_header_submit(entry);
354
355                 /* This should never happen */
356                 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
357                         entry->cb_header->status = SVGA_CB_STATUS_NONE;
358                         break;
359                 }
360
361                 list_del(&entry->list);
362                 list_add_tail(&entry->list, &ctx->hw_submitted);
363                 ctx->num_hw_submitted++;
364         }
365
366 }
367
368 /**
369  * vmw_cmdbuf_ctx_process - Process a command buffer context.
370  *
371  * @man: The command buffer manager.
372  * @ctx: The command buffer context.
373  * @notempty: Pass back count of non-empty command submitted lists.
374  *
375  * Submit command buffers to hardware if possible, and process finished
376  * buffers. Typically freeing them, but on preemption or error take
377  * appropriate action. Wake up waiters if appropriate.
378  */
379 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
380                                    struct vmw_cmdbuf_context *ctx,
381                                    int *notempty)
382 {
383         struct vmw_cmdbuf_header *entry, *next;
384
385         vmw_cmdbuf_ctx_submit(man, ctx);
386
387         list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
388                 SVGACBStatus status = entry->cb_header->status;
389
390                 if (status == SVGA_CB_STATUS_NONE)
391                         break;
392
393                 list_del(&entry->list);
394                 wake_up_all(&man->idle_queue);
395                 ctx->num_hw_submitted--;
396                 switch (status) {
397                 case SVGA_CB_STATUS_COMPLETED:
398                         __vmw_cmdbuf_header_free(entry);
399                         break;
400                 case SVGA_CB_STATUS_COMMAND_ERROR:
401                         WARN_ONCE(true, "Command buffer error.\n");
402                         entry->cb_header->status = SVGA_CB_STATUS_NONE;
403                         list_add_tail(&entry->list, &man->error);
404                         schedule_work(&man->work);
405                         break;
406                 case SVGA_CB_STATUS_PREEMPTED:
407                         entry->cb_header->status = SVGA_CB_STATUS_NONE;
408                         list_add_tail(&entry->list, &ctx->preempted);
409                         break;
410                 case SVGA_CB_STATUS_CB_HEADER_ERROR:
411                         WARN_ONCE(true, "Command buffer header error.\n");
412                         __vmw_cmdbuf_header_free(entry);
413                         break;
414                 default:
415                         WARN_ONCE(true, "Undefined command buffer status.\n");
416                         __vmw_cmdbuf_header_free(entry);
417                         break;
418                 }
419         }
420
421         vmw_cmdbuf_ctx_submit(man, ctx);
422         if (!list_empty(&ctx->submitted))
423                 (*notempty)++;
424 }
425
426 /**
427  * vmw_cmdbuf_man_process - Process all command buffer contexts and
428  * switch on and off irqs as appropriate.
429  *
430  * @man: The command buffer manager.
431  *
432  * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
433  * command buffers left that are not submitted to hardware, Make sure
434  * IRQ handling is turned on. Otherwise, make sure it's turned off.
435  */
436 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
437 {
438         int notempty;
439         struct vmw_cmdbuf_context *ctx;
440         int i;
441
442 retry:
443         notempty = 0;
444         for_each_cmdbuf_ctx(man, i, ctx)
445                 vmw_cmdbuf_ctx_process(man, ctx, &notempty);
446
447         if (man->irq_on && !notempty) {
448                 vmw_generic_waiter_remove(man->dev_priv,
449                                           SVGA_IRQFLAG_COMMAND_BUFFER,
450                                           &man->dev_priv->cmdbuf_waiters);
451                 man->irq_on = false;
452         } else if (!man->irq_on && notempty) {
453                 vmw_generic_waiter_add(man->dev_priv,
454                                        SVGA_IRQFLAG_COMMAND_BUFFER,
455                                        &man->dev_priv->cmdbuf_waiters);
456                 man->irq_on = true;
457
458                 /* Rerun in case we just missed an irq. */
459                 goto retry;
460         }
461 }
462
463 /**
464  * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
465  * command buffer context
466  *
467  * @man: The command buffer manager.
468  * @header: The header of the buffer to submit.
469  * @cb_context: The command buffer context to use.
470  *
471  * This function adds @header to the "submitted" queue of the command
472  * buffer context identified by @cb_context. It then calls the command buffer
473  * manager processing to potentially submit the buffer to hardware.
474  * @man->lock needs to be held when calling this function.
475  */
476 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
477                                struct vmw_cmdbuf_header *header,
478                                SVGACBContext cb_context)
479 {
480         if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
481                 header->cb_header->dxContext = 0;
482         header->cb_context = cb_context;
483         list_add_tail(&header->list, &man->ctx[cb_context].submitted);
484
485         vmw_cmdbuf_man_process(man);
486 }
487
488 /**
489  * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
490  * handler implemented as a threaded irq task.
491  *
492  * @man: Pointer to the command buffer manager.
493  *
494  * The bottom half of the interrupt handler simply calls into the
495  * command buffer processor to free finished buffers and submit any
496  * queued buffers to hardware.
497  */
498 void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
499 {
500         spin_lock(&man->lock);
501         vmw_cmdbuf_man_process(man);
502         spin_unlock(&man->lock);
503 }
504
505 /**
506  * vmw_cmdbuf_work_func - The deferred work function that handles
507  * command buffer errors.
508  *
509  * @work: The work func closure argument.
510  *
511  * Restarting the command buffer context after an error requires process
512  * context, so it is deferred to this work function.
513  */
514 static void vmw_cmdbuf_work_func(struct work_struct *work)
515 {
516         struct vmw_cmdbuf_man *man =
517                 container_of(work, struct vmw_cmdbuf_man, work);
518         struct vmw_cmdbuf_header *entry, *next;
519         uint32_t dummy;
520         bool send_fence = false;
521         struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
522         int i;
523         struct vmw_cmdbuf_context *ctx;
524         bool global_block = false;
525
526         for_each_cmdbuf_ctx(man, i, ctx)
527                 INIT_LIST_HEAD(&restart_head[i]);
528
529         mutex_lock(&man->error_mutex);
530         spin_lock(&man->lock);
531         list_for_each_entry_safe(entry, next, &man->error, list) {
532                 SVGACBHeader *cb_hdr = entry->cb_header;
533                 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
534                         (entry->cmd + cb_hdr->errorOffset);
535                 u32 error_cmd_size, new_start_offset;
536                 const char *cmd_name;
537
538                 list_del_init(&entry->list);
539                 global_block = true;
540
541                 if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
542                         VMW_DEBUG_USER("Unknown command causing device error.\n");
543                         VMW_DEBUG_USER("Command buffer offset is %lu\n",
544                                        (unsigned long) cb_hdr->errorOffset);
545                         __vmw_cmdbuf_header_free(entry);
546                         send_fence = true;
547                         continue;
548                 }
549
550                 VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
551                                cmd_name);
552                 VMW_DEBUG_USER("Command buffer offset is %lu\n",
553                                (unsigned long) cb_hdr->errorOffset);
554                 VMW_DEBUG_USER("Command size is %lu\n",
555                                (unsigned long) error_cmd_size);
556
557                 new_start_offset = cb_hdr->errorOffset + error_cmd_size;
558
559                 if (new_start_offset >= cb_hdr->length) {
560                         __vmw_cmdbuf_header_free(entry);
561                         send_fence = true;
562                         continue;
563                 }
564
565                 if (man->using_mob)
566                         cb_hdr->ptr.mob.mobOffset += new_start_offset;
567                 else
568                         cb_hdr->ptr.pa += (u64) new_start_offset;
569
570                 entry->cmd += new_start_offset;
571                 cb_hdr->length -= new_start_offset;
572                 cb_hdr->errorOffset = 0;
573                 cb_hdr->offset = 0;
574
575                 list_add_tail(&entry->list, &restart_head[entry->cb_context]);
576         }
577
578         for_each_cmdbuf_ctx(man, i, ctx)
579                 man->ctx[i].block_submission = true;
580
581         spin_unlock(&man->lock);
582
583         /* Preempt all contexts */
584         if (global_block && vmw_cmdbuf_preempt(man, 0))
585                 DRM_ERROR("Failed preempting command buffer contexts\n");
586
587         spin_lock(&man->lock);
588         for_each_cmdbuf_ctx(man, i, ctx) {
589                 /* Move preempted command buffers to the preempted queue. */
590                 vmw_cmdbuf_ctx_process(man, ctx, &dummy);
591
592                 /*
593                  * Add the preempted queue after the command buffer
594                  * that caused an error.
595                  */
596                 list_splice_init(&ctx->preempted, restart_head[i].prev);
597
598                 /*
599                  * Finally add all command buffers first in the submitted
600                  * queue, to rerun them.
601                  */
602
603                 ctx->block_submission = false;
604                 list_splice_init(&restart_head[i], &ctx->submitted);
605         }
606
607         vmw_cmdbuf_man_process(man);
608         spin_unlock(&man->lock);
609
610         if (global_block && vmw_cmdbuf_startstop(man, 0, true))
611                 DRM_ERROR("Failed restarting command buffer contexts\n");
612
613         /* Send a new fence in case one was removed */
614         if (send_fence) {
615                 vmw_cmd_send_fence(man->dev_priv, &dummy);
616                 wake_up_all(&man->idle_queue);
617         }
618
619         mutex_unlock(&man->error_mutex);
620 }
621
622 /**
623  * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle.
624  *
625  * @man: The command buffer manager.
626  * @check_preempted: Check also the preempted queue for pending command buffers.
627  *
628  */
629 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
630                                 bool check_preempted)
631 {
632         struct vmw_cmdbuf_context *ctx;
633         bool idle = false;
634         int i;
635
636         spin_lock(&man->lock);
637         vmw_cmdbuf_man_process(man);
638         for_each_cmdbuf_ctx(man, i, ctx) {
639                 if (!list_empty(&ctx->submitted) ||
640                     !list_empty(&ctx->hw_submitted) ||
641                     (check_preempted && !list_empty(&ctx->preempted)))
642                         goto out_unlock;
643         }
644
645         idle = list_empty(&man->error);
646
647 out_unlock:
648         spin_unlock(&man->lock);
649
650         return idle;
651 }
652
653 /**
654  * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
655  * command submissions
656  *
657  * @man: The command buffer manager.
658  *
659  * Flushes the current command buffer without allocating a new one. A new one
660  * is automatically allocated when needed. Call with @man->cur_mutex held.
661  */
662 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
663 {
664         struct vmw_cmdbuf_header *cur = man->cur;
665
666         lockdep_assert_held_once(&man->cur_mutex);
667
668         if (!cur)
669                 return;
670
671         spin_lock(&man->lock);
672         if (man->cur_pos == 0) {
673                 __vmw_cmdbuf_header_free(cur);
674                 goto out_unlock;
675         }
676
677         man->cur->cb_header->length = man->cur_pos;
678         vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
679 out_unlock:
680         spin_unlock(&man->lock);
681         man->cur = NULL;
682         man->cur_pos = 0;
683 }
684
685 /**
686  * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
687  * command submissions
688  *
689  * @man: The command buffer manager.
690  * @interruptible: Whether to sleep interruptible when sleeping.
691  *
692  * Flushes the current command buffer without allocating a new one. A new one
693  * is automatically allocated when needed.
694  */
695 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
696                          bool interruptible)
697 {
698         int ret = vmw_cmdbuf_cur_lock(man, interruptible);
699
700         if (ret)
701                 return ret;
702
703         __vmw_cmdbuf_cur_flush(man);
704         vmw_cmdbuf_cur_unlock(man);
705
706         return 0;
707 }
708
709 /**
710  * vmw_cmdbuf_idle - Wait for command buffer manager idle.
711  *
712  * @man: The command buffer manager.
713  * @interruptible: Sleep interruptible while waiting.
714  * @timeout: Time out after this many ticks.
715  *
716  * Wait until the command buffer manager has processed all command buffers,
717  * or until a timeout occurs. If a timeout occurs, the function will return
718  * -EBUSY.
719  */
720 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
721                     unsigned long timeout)
722 {
723         int ret;
724
725         ret = vmw_cmdbuf_cur_flush(man, interruptible);
726         vmw_generic_waiter_add(man->dev_priv,
727                                SVGA_IRQFLAG_COMMAND_BUFFER,
728                                &man->dev_priv->cmdbuf_waiters);
729
730         if (interruptible) {
731                 ret = wait_event_interruptible_timeout
732                         (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
733                          timeout);
734         } else {
735                 ret = wait_event_timeout
736                         (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
737                          timeout);
738         }
739         vmw_generic_waiter_remove(man->dev_priv,
740                                   SVGA_IRQFLAG_COMMAND_BUFFER,
741                                   &man->dev_priv->cmdbuf_waiters);
742         if (ret == 0) {
743                 if (!vmw_cmdbuf_man_idle(man, true))
744                         ret = -EBUSY;
745                 else
746                         ret = 0;
747         }
748         if (ret > 0)
749                 ret = 0;
750
751         return ret;
752 }
753
754 /**
755  * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
756  *
757  * @man: The command buffer manager.
758  * @info: Allocation info. Will hold the size on entry and allocated mm node
759  * on successful return.
760  *
761  * Try to allocate buffer space from the main pool. Returns true if succeeded.
762  * If a fatal error was hit, the error code is returned in @info->ret.
763  */
764 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
765                                  struct vmw_cmdbuf_alloc_info *info)
766 {
767         int ret;
768
769         if (info->done)
770                 return true;
771
772         memset(info->node, 0, sizeof(*info->node));
773         spin_lock(&man->lock);
774         ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
775         if (ret) {
776                 vmw_cmdbuf_man_process(man);
777                 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
778         }
779
780         spin_unlock(&man->lock);
781         info->done = !ret;
782
783         return info->done;
784 }
785
786 /**
787  * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
788  *
789  * @man: The command buffer manager.
790  * @node: Pointer to pre-allocated range-manager node.
791  * @size: The size of the allocation.
792  * @interruptible: Whether to sleep interruptible while waiting for space.
793  *
794  * This function allocates buffer space from the main pool, and if there is
795  * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
796  * become available.
797  */
798 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
799                                   struct drm_mm_node *node,
800                                   size_t size,
801                                   bool interruptible)
802 {
803         struct vmw_cmdbuf_alloc_info info;
804
805         info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
806         info.node = node;
807         info.done = false;
808
809         /*
810          * To prevent starvation of large requests, only one allocating call
811          * at a time waiting for space.
812          */
813         if (interruptible) {
814                 if (mutex_lock_interruptible(&man->space_mutex))
815                         return -ERESTARTSYS;
816         } else {
817                 mutex_lock(&man->space_mutex);
818         }
819
820         /* Try to allocate space without waiting. */
821         if (vmw_cmdbuf_try_alloc(man, &info))
822                 goto out_unlock;
823
824         vmw_generic_waiter_add(man->dev_priv,
825                                SVGA_IRQFLAG_COMMAND_BUFFER,
826                                &man->dev_priv->cmdbuf_waiters);
827
828         if (interruptible) {
829                 int ret;
830
831                 ret = wait_event_interruptible
832                         (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
833                 if (ret) {
834                         vmw_generic_waiter_remove
835                                 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
836                                  &man->dev_priv->cmdbuf_waiters);
837                         mutex_unlock(&man->space_mutex);
838                         return ret;
839                 }
840         } else {
841                 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
842         }
843         vmw_generic_waiter_remove(man->dev_priv,
844                                   SVGA_IRQFLAG_COMMAND_BUFFER,
845                                   &man->dev_priv->cmdbuf_waiters);
846
847 out_unlock:
848         mutex_unlock(&man->space_mutex);
849
850         return 0;
851 }
852
853 /**
854  * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
855  * space from the main pool.
856  *
857  * @man: The command buffer manager.
858  * @header: Pointer to the header to set up.
859  * @size: The requested size of the buffer space.
860  * @interruptible: Whether to sleep interruptible while waiting for space.
861  */
862 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
863                                  struct vmw_cmdbuf_header *header,
864                                  size_t size,
865                                  bool interruptible)
866 {
867         SVGACBHeader *cb_hdr;
868         size_t offset;
869         int ret;
870
871         if (!man->has_pool)
872                 return -ENOMEM;
873
874         ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
875
876         if (ret)
877                 return ret;
878
879         header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
880                                             &header->handle);
881         if (!header->cb_header) {
882                 ret = -ENOMEM;
883                 goto out_no_cb_header;
884         }
885
886         header->size = header->node.size << PAGE_SHIFT;
887         cb_hdr = header->cb_header;
888         offset = header->node.start << PAGE_SHIFT;
889         header->cmd = man->map + offset;
890         if (man->using_mob) {
891                 cb_hdr->flags = SVGA_CB_FLAG_MOB;
892                 cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
893                 cb_hdr->ptr.mob.mobOffset = offset;
894         } else {
895                 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
896         }
897
898         return 0;
899
900 out_no_cb_header:
901         spin_lock(&man->lock);
902         drm_mm_remove_node(&header->node);
903         spin_unlock(&man->lock);
904
905         return ret;
906 }
907
908 /**
909  * vmw_cmdbuf_space_inline - Set up a command buffer header with
910  * inline command buffer space.
911  *
912  * @man: The command buffer manager.
913  * @header: Pointer to the header to set up.
914  * @size: The requested size of the buffer space.
915  */
916 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
917                                    struct vmw_cmdbuf_header *header,
918                                    int size)
919 {
920         struct vmw_cmdbuf_dheader *dheader;
921         SVGACBHeader *cb_hdr;
922
923         if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
924                 return -ENOMEM;
925
926         dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
927                                   &header->handle);
928         if (!dheader)
929                 return -ENOMEM;
930
931         header->inline_space = true;
932         header->size = VMW_CMDBUF_INLINE_SIZE;
933         cb_hdr = &dheader->cb_header;
934         header->cb_header = cb_hdr;
935         header->cmd = dheader->cmd;
936         cb_hdr->status = SVGA_CB_STATUS_NONE;
937         cb_hdr->flags = SVGA_CB_FLAG_NONE;
938         cb_hdr->ptr.pa = (u64)header->handle +
939                 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
940
941         return 0;
942 }
943
944 /**
945  * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
946  * command buffer space.
947  *
948  * @man: The command buffer manager.
949  * @size: The requested size of the buffer space.
950  * @interruptible: Whether to sleep interruptible while waiting for space.
951  * @p_header: points to a header pointer to populate on successful return.
952  *
953  * Returns a pointer to command buffer space if successful. Otherwise
954  * returns an error pointer. The header pointer returned in @p_header should
955  * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
956  */
957 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
958                        size_t size, bool interruptible,
959                        struct vmw_cmdbuf_header **p_header)
960 {
961         struct vmw_cmdbuf_header *header;
962         int ret = 0;
963
964         *p_header = NULL;
965
966         header = kzalloc(sizeof(*header), GFP_KERNEL);
967         if (!header)
968                 return ERR_PTR(-ENOMEM);
969
970         if (size <= VMW_CMDBUF_INLINE_SIZE)
971                 ret = vmw_cmdbuf_space_inline(man, header, size);
972         else
973                 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
974
975         if (ret) {
976                 kfree(header);
977                 return ERR_PTR(ret);
978         }
979
980         header->man = man;
981         INIT_LIST_HEAD(&header->list);
982         header->cb_header->status = SVGA_CB_STATUS_NONE;
983         *p_header = header;
984
985         return header->cmd;
986 }
987
988 /**
989  * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
990  * command buffer.
991  *
992  * @man: The command buffer manager.
993  * @size: The requested size of the commands.
994  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
995  * @interruptible: Whether to sleep interruptible while waiting for space.
996  *
997  * Returns a pointer to command buffer space if successful. Otherwise
998  * returns an error pointer.
999  */
1000 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
1001                                     size_t size,
1002                                     int ctx_id,
1003                                     bool interruptible)
1004 {
1005         struct vmw_cmdbuf_header *cur;
1006         void *ret;
1007
1008         if (vmw_cmdbuf_cur_lock(man, interruptible))
1009                 return ERR_PTR(-ERESTARTSYS);
1010
1011         cur = man->cur;
1012         if (cur && (size + man->cur_pos > cur->size ||
1013                     ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1014                      ctx_id != cur->cb_header->dxContext)))
1015                 __vmw_cmdbuf_cur_flush(man);
1016
1017         if (!man->cur) {
1018                 ret = vmw_cmdbuf_alloc(man,
1019                                        max_t(size_t, size, man->default_size),
1020                                        interruptible, &man->cur);
1021                 if (IS_ERR(ret)) {
1022                         vmw_cmdbuf_cur_unlock(man);
1023                         return ret;
1024                 }
1025
1026                 cur = man->cur;
1027         }
1028
1029         if (ctx_id != SVGA3D_INVALID_ID) {
1030                 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1031                 cur->cb_header->dxContext = ctx_id;
1032         }
1033
1034         cur->reserved = size;
1035
1036         return (void *) (man->cur->cmd + man->cur_pos);
1037 }
1038
1039 /**
1040  * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1041  *
1042  * @man: The command buffer manager.
1043  * @size: The size of the commands actually written.
1044  * @flush: Whether to flush the command buffer immediately.
1045  */
1046 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1047                                   size_t size, bool flush)
1048 {
1049         struct vmw_cmdbuf_header *cur = man->cur;
1050
1051         lockdep_assert_held_once(&man->cur_mutex);
1052
1053         WARN_ON(size > cur->reserved);
1054         man->cur_pos += size;
1055         if (!size)
1056                 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1057         if (flush)
1058                 __vmw_cmdbuf_cur_flush(man);
1059         vmw_cmdbuf_cur_unlock(man);
1060 }
1061
1062 /**
1063  * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1064  *
1065  * @man: The command buffer manager.
1066  * @size: The requested size of the commands.
1067  * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1068  * @interruptible: Whether to sleep interruptible while waiting for space.
1069  * @header: Header of the command buffer. NULL if the current command buffer
1070  * should be used.
1071  *
1072  * Returns a pointer to command buffer space if successful. Otherwise
1073  * returns an error pointer.
1074  */
1075 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1076                          int ctx_id, bool interruptible,
1077                          struct vmw_cmdbuf_header *header)
1078 {
1079         if (!header)
1080                 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1081
1082         if (size > header->size)
1083                 return ERR_PTR(-EINVAL);
1084
1085         if (ctx_id != SVGA3D_INVALID_ID) {
1086                 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1087                 header->cb_header->dxContext = ctx_id;
1088         }
1089
1090         header->reserved = size;
1091         return header->cmd;
1092 }
1093
1094 /**
1095  * vmw_cmdbuf_commit - Commit commands in a command buffer.
1096  *
1097  * @man: The command buffer manager.
1098  * @size: The size of the commands actually written.
1099  * @header: Header of the command buffer. NULL if the current command buffer
1100  * should be used.
1101  * @flush: Whether to flush the command buffer immediately.
1102  */
1103 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1104                        struct vmw_cmdbuf_header *header, bool flush)
1105 {
1106         if (!header) {
1107                 vmw_cmdbuf_commit_cur(man, size, flush);
1108                 return;
1109         }
1110
1111         (void) vmw_cmdbuf_cur_lock(man, false);
1112         __vmw_cmdbuf_cur_flush(man);
1113         WARN_ON(size > header->reserved);
1114         man->cur = header;
1115         man->cur_pos = size;
1116         if (!size)
1117                 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1118         if (flush)
1119                 __vmw_cmdbuf_cur_flush(man);
1120         vmw_cmdbuf_cur_unlock(man);
1121 }
1122
1123
1124 /**
1125  * vmw_cmdbuf_send_device_command - Send a command through the device context.
1126  *
1127  * @man: The command buffer manager.
1128  * @command: Pointer to the command to send.
1129  * @size: Size of the command.
1130  *
1131  * Synchronously sends a device context command.
1132  */
1133 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1134                                           const void *command,
1135                                           size_t size)
1136 {
1137         struct vmw_cmdbuf_header *header;
1138         int status;
1139         void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1140
1141         if (IS_ERR(cmd))
1142                 return PTR_ERR(cmd);
1143
1144         memcpy(cmd, command, size);
1145         header->cb_header->length = size;
1146         header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1147         spin_lock(&man->lock);
1148         status = vmw_cmdbuf_header_submit(header);
1149         spin_unlock(&man->lock);
1150         vmw_cmdbuf_header_free(header);
1151
1152         if (status != SVGA_CB_STATUS_COMPLETED) {
1153                 DRM_ERROR("Device context command failed with status %d\n",
1154                           status);
1155                 return -EINVAL;
1156         }
1157
1158         return 0;
1159 }
1160
1161 /**
1162  * vmw_cmdbuf_preempt - Send a preempt command through the device
1163  * context.
1164  *
1165  * @man: The command buffer manager.
1166  * @context: Device context to pass command through.
1167  *
1168  * Synchronously sends a preempt command.
1169  */
1170 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1171 {
1172         struct {
1173                 uint32 id;
1174                 SVGADCCmdPreempt body;
1175         } __packed cmd;
1176
1177         cmd.id = SVGA_DC_CMD_PREEMPT;
1178         cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1179         cmd.body.ignoreIDZero = 0;
1180
1181         return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1182 }
1183
1184
1185 /**
1186  * vmw_cmdbuf_startstop - Send a start / stop command through the device
1187  * context.
1188  *
1189  * @man: The command buffer manager.
1190  * @context: Device context to start/stop.
1191  * @enable: Whether to enable or disable the context.
1192  *
1193  * Synchronously sends a device start / stop context command.
1194  */
1195 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1196                                 bool enable)
1197 {
1198         struct {
1199                 uint32 id;
1200                 SVGADCCmdStartStop body;
1201         } __packed cmd;
1202
1203         cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1204         cmd.body.enable = (enable) ? 1 : 0;
1205         cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1206
1207         return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1208 }
1209
1210 /**
1211  * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1212  *
1213  * @man: The command buffer manager.
1214  * @size: The size of the main space pool.
1215  *
1216  * Set the size and allocate the main command buffer space pool.
1217  * If successful, this enables large command submissions.
1218  * Note that this function requires that rudimentary command
1219  * submission is already available and that the MOB memory manager is alive.
1220  * Returns 0 on success. Negative error code on failure.
1221  */
1222 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
1223 {
1224         struct vmw_private *dev_priv = man->dev_priv;
1225         bool dummy;
1226         int ret;
1227
1228         if (man->has_pool)
1229                 return -EINVAL;
1230
1231         /* First, try to allocate a huge chunk of DMA memory */
1232         size = PAGE_ALIGN(size);
1233         man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
1234                                       &man->handle, GFP_KERNEL);
1235         if (man->map) {
1236                 man->using_mob = false;
1237         } else {
1238                 /*
1239                  * DMA memory failed. If we can have command buffers in a
1240                  * MOB, try to use that instead. Note that this will
1241                  * actually call into the already enabled manager, when
1242                  * binding the MOB.
1243                  */
1244                 if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
1245                     !dev_priv->has_mob)
1246                         return -ENOMEM;
1247
1248                 ret = vmw_bo_create_kernel(dev_priv, size,
1249                                            &vmw_mob_placement,
1250                                            &man->cmd_space);
1251                 if (ret)
1252                         return ret;
1253
1254                 man->using_mob = true;
1255                 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1256                                   &man->map_obj);
1257                 if (ret)
1258                         goto out_no_map;
1259
1260                 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1261         }
1262
1263         man->size = size;
1264         drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1265
1266         man->has_pool = true;
1267
1268         /*
1269          * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1270          * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1271          * needs to wait for space and we block on further command
1272          * submissions to be able to free up space.
1273          */
1274         man->default_size = VMW_CMDBUF_INLINE_SIZE;
1275         DRM_INFO("Using command buffers with %s pool.\n",
1276                  (man->using_mob) ? "MOB" : "DMA");
1277
1278         return 0;
1279
1280 out_no_map:
1281         if (man->using_mob) {
1282                 ttm_bo_put(man->cmd_space);
1283                 man->cmd_space = NULL;
1284         }
1285
1286         return ret;
1287 }
1288
1289 /**
1290  * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1291  * inline command buffer submissions only.
1292  *
1293  * @dev_priv: Pointer to device private structure.
1294  *
1295  * Returns a pointer to a cummand buffer manager to success or error pointer
1296  * on failure. The command buffer manager will be enabled for submissions of
1297  * size VMW_CMDBUF_INLINE_SIZE only.
1298  */
1299 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1300 {
1301         struct vmw_cmdbuf_man *man;
1302         struct vmw_cmdbuf_context *ctx;
1303         unsigned int i;
1304         int ret;
1305
1306         if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1307                 return ERR_PTR(-ENOSYS);
1308
1309         man = kzalloc(sizeof(*man), GFP_KERNEL);
1310         if (!man)
1311                 return ERR_PTR(-ENOMEM);
1312
1313         man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1314                 2 : 1;
1315         man->headers = dma_pool_create("vmwgfx cmdbuf",
1316                                        dev_priv->drm.dev,
1317                                        sizeof(SVGACBHeader),
1318                                        64, PAGE_SIZE);
1319         if (!man->headers) {
1320                 ret = -ENOMEM;
1321                 goto out_no_pool;
1322         }
1323
1324         man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1325                                         dev_priv->drm.dev,
1326                                         sizeof(struct vmw_cmdbuf_dheader),
1327                                         64, PAGE_SIZE);
1328         if (!man->dheaders) {
1329                 ret = -ENOMEM;
1330                 goto out_no_dpool;
1331         }
1332
1333         for_each_cmdbuf_ctx(man, i, ctx)
1334                 vmw_cmdbuf_ctx_init(ctx);
1335
1336         INIT_LIST_HEAD(&man->error);
1337         spin_lock_init(&man->lock);
1338         mutex_init(&man->cur_mutex);
1339         mutex_init(&man->space_mutex);
1340         mutex_init(&man->error_mutex);
1341         man->default_size = VMW_CMDBUF_INLINE_SIZE;
1342         init_waitqueue_head(&man->alloc_queue);
1343         init_waitqueue_head(&man->idle_queue);
1344         man->dev_priv = dev_priv;
1345         man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1346         INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1347         vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1348                                &dev_priv->error_waiters);
1349         ret = vmw_cmdbuf_startstop(man, 0, true);
1350         if (ret) {
1351                 DRM_ERROR("Failed starting command buffer contexts\n");
1352                 vmw_cmdbuf_man_destroy(man);
1353                 return ERR_PTR(ret);
1354         }
1355
1356         return man;
1357
1358 out_no_dpool:
1359         dma_pool_destroy(man->headers);
1360 out_no_pool:
1361         kfree(man);
1362
1363         return ERR_PTR(ret);
1364 }
1365
1366 /**
1367  * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1368  *
1369  * @man: Pointer to a command buffer manager.
1370  *
1371  * This function removes the main buffer space pool, and should be called
1372  * before MOB memory management is removed. When this function has been called,
1373  * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1374  * less are allowed, and the default size of the command buffer for small kernel
1375  * submissions is also set to this size.
1376  */
1377 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1378 {
1379         if (!man->has_pool)
1380                 return;
1381
1382         man->has_pool = false;
1383         man->default_size = VMW_CMDBUF_INLINE_SIZE;
1384         (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1385         if (man->using_mob) {
1386                 (void) ttm_bo_kunmap(&man->map_obj);
1387                 ttm_bo_put(man->cmd_space);
1388                 man->cmd_space = NULL;
1389         } else {
1390                 dma_free_coherent(man->dev_priv->drm.dev,
1391                                   man->size, man->map, man->handle);
1392         }
1393 }
1394
1395 /**
1396  * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1397  *
1398  * @man: Pointer to a command buffer manager.
1399  *
1400  * This function idles and then destroys a command buffer manager.
1401  */
1402 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1403 {
1404         WARN_ON_ONCE(man->has_pool);
1405         (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1406
1407         if (vmw_cmdbuf_startstop(man, 0, false))
1408                 DRM_ERROR("Failed stopping command buffer contexts.\n");
1409
1410         vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1411                                   &man->dev_priv->error_waiters);
1412         (void) cancel_work_sync(&man->work);
1413         dma_pool_destroy(man->dheaders);
1414         dma_pool_destroy(man->headers);
1415         mutex_destroy(&man->cur_mutex);
1416         mutex_destroy(&man->space_mutex);
1417         mutex_destroy(&man->error_mutex);
1418         kfree(man);
1419 }