1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include <linux/sync_file.h>
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_reg.h"
31 #include <drm/ttm/ttm_bo_api.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_so.h"
34 #include "vmwgfx_binding.h"
36 #define VMW_RES_HT_ORDER 12
39 * Helper macro to get dx_ctx_node if available otherwise print an error
40 * message. This is for use in command verifier function where if dx_ctx_node
41 * is not set then command is invalid.
43 #define VMW_GET_CTX_NODE(__sw_context) \
45 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
46 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
47 __sw_context->dx_ctx_node; \
51 #define VMW_DECLARE_CMD_VAR(__var, __type) \
53 SVGA3dCmdHeader header; \
58 * struct vmw_relocation - Buffer object relocation
60 * @head: List head for the command submission context's relocation list
61 * @vbo: Non ref-counted pointer to buffer object
62 * @mob_loc: Pointer to location for mob id to be modified
63 * @location: Pointer to location for guest pointer to be modified
65 struct vmw_relocation {
66 struct list_head head;
67 struct vmw_buffer_object *vbo;
70 SVGAGuestPtr *location;
75 * enum vmw_resource_relocation_type - Relocation type for resources
77 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
78 * command stream is replaced with the actual id after validation.
79 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
81 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
82 * validation is -1, the command is replaced with a NOP. Otherwise no action.
83 * @vmw_res_rel_max: Last value in the enum - used for error checking
85 enum vmw_resource_relocation_type {
93 * struct vmw_resource_relocation - Relocation info for resources
95 * @head: List head for the software context's relocation list.
96 * @res: Non-ref-counted pointer to the resource.
97 * @offset: Offset of single byte entries into the command buffer where the id
98 * that needs fixup is located.
99 * @rel_type: Type of relocation.
101 struct vmw_resource_relocation {
102 struct list_head head;
103 const struct vmw_resource *res;
105 enum vmw_resource_relocation_type rel_type:3;
109 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
111 * @head: List head of context list
112 * @ctx: The context resource
113 * @cur: The context's persistent binding state
114 * @staged: The binding state changes of this command buffer
116 struct vmw_ctx_validation_info {
117 struct list_head head;
118 struct vmw_resource *ctx;
119 struct vmw_ctx_binding_state *cur;
120 struct vmw_ctx_binding_state *staged;
124 * struct vmw_cmd_entry - Describe a command for the verifier
126 * @func: Call-back to handle the command.
127 * @user_allow: Whether allowed from the execbuf ioctl.
128 * @gb_disable: Whether disabled if guest-backed objects are available.
129 * @gb_enable: Whether enabled iff guest-backed objects are available.
130 * @cmd_name: Name of the command.
132 struct vmw_cmd_entry {
133 int (*func) (struct vmw_private *, struct vmw_sw_context *,
138 const char *cmd_name;
141 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
142 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
143 (_gb_disable), (_gb_enable), #_cmd}
145 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
146 struct vmw_sw_context *sw_context,
147 struct vmw_resource *ctx);
148 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
149 struct vmw_sw_context *sw_context,
151 struct vmw_buffer_object **vmw_bo_p);
153 * vmw_ptr_diff - Compute the offset from a to b in bytes
155 * @a: A starting pointer.
156 * @b: A pointer offset in the same address space.
158 * Returns: The offset in bytes between the two pointers.
160 static size_t vmw_ptr_diff(void *a, void *b)
162 return (unsigned long) b - (unsigned long) a;
166 * vmw_execbuf_bindings_commit - Commit modified binding state
168 * @sw_context: The command submission context
169 * @backoff: Whether this is part of the error path and binding state changes
172 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
175 struct vmw_ctx_validation_info *entry;
177 list_for_each_entry(entry, &sw_context->ctx_list, head) {
179 vmw_binding_state_commit(entry->cur, entry->staged);
181 if (entry->staged != sw_context->staged_bindings)
182 vmw_binding_state_free(entry->staged);
184 sw_context->staged_bindings_inuse = false;
187 /* List entries are freed with the validation context */
188 INIT_LIST_HEAD(&sw_context->ctx_list);
192 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
194 * @sw_context: The command submission context
196 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
198 if (sw_context->dx_query_mob)
199 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
200 sw_context->dx_query_mob);
204 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
207 * @dev_priv: Pointer to the device private:
208 * @sw_context: The command submission context
209 * @res: Pointer to the resource
210 * @node: The validation node holding the context resource metadata
212 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
213 struct vmw_sw_context *sw_context,
214 struct vmw_resource *res,
215 struct vmw_ctx_validation_info *node)
219 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
220 if (unlikely(ret != 0))
223 if (!sw_context->staged_bindings) {
224 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
225 if (IS_ERR(sw_context->staged_bindings)) {
226 ret = PTR_ERR(sw_context->staged_bindings);
227 sw_context->staged_bindings = NULL;
232 if (sw_context->staged_bindings_inuse) {
233 node->staged = vmw_binding_state_alloc(dev_priv);
234 if (IS_ERR(node->staged)) {
235 ret = PTR_ERR(node->staged);
240 node->staged = sw_context->staged_bindings;
241 sw_context->staged_bindings_inuse = true;
245 node->cur = vmw_context_binding_state(res);
246 list_add_tail(&node->head, &sw_context->ctx_list);
255 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
257 * @dev_priv: Pointer to the device private struct.
258 * @res_type: The resource type.
260 * Guest-backed contexts and DX contexts require extra size to store execbuf
261 * private information in the validation node. Typically the binding manager
262 * associated data structures.
264 * Returns: The extra size requirement based on resource type.
266 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
267 enum vmw_res_type res_type)
269 return (res_type == vmw_res_dx_context ||
270 (res_type == vmw_res_context && dev_priv->has_mob)) ?
271 sizeof(struct vmw_ctx_validation_info) : 0;
275 * vmw_execbuf_rcache_update - Update a resource-node cache entry
277 * @rcache: Pointer to the entry to update.
278 * @res: Pointer to the resource.
279 * @private: Pointer to the execbuf-private space in the resource validation
282 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
283 struct vmw_resource *res,
287 rcache->private = private;
289 rcache->valid_handle = 0;
293 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
294 * rcu-protected pointer to the validation list.
296 * @sw_context: Pointer to the software context.
297 * @res: Unreferenced rcu-protected pointer to the resource.
298 * @dirty: Whether to change dirty status.
300 * Returns: 0 on success. Negative error code on failure. Typical error codes
301 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
303 static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
304 struct vmw_resource *res,
307 struct vmw_private *dev_priv = res->dev_priv;
309 enum vmw_res_type res_type = vmw_res_type(res);
310 struct vmw_res_cache_entry *rcache;
311 struct vmw_ctx_validation_info *ctx_info;
313 unsigned int priv_size;
315 rcache = &sw_context->res_cache[res_type];
316 if (likely(rcache->valid && rcache->res == res)) {
318 vmw_validation_res_set_dirty(sw_context->ctx,
319 rcache->private, dirty);
320 vmw_user_resource_noref_release();
324 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
325 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
326 dirty, (void **)&ctx_info,
328 vmw_user_resource_noref_release();
332 if (priv_size && first_usage) {
333 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
336 VMW_DEBUG_USER("Failed first usage context setup.\n");
341 vmw_execbuf_rcache_update(rcache, res, ctx_info);
346 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
347 * validation list if it's not already on it
349 * @sw_context: Pointer to the software context.
350 * @res: Pointer to the resource.
351 * @dirty: Whether to change dirty status.
353 * Returns: Zero on success. Negative error code on failure.
355 static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
356 struct vmw_resource *res,
359 struct vmw_res_cache_entry *rcache;
360 enum vmw_res_type res_type = vmw_res_type(res);
364 rcache = &sw_context->res_cache[res_type];
365 if (likely(rcache->valid && rcache->res == res)) {
367 vmw_validation_res_set_dirty(sw_context->ctx,
368 rcache->private, dirty);
372 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
377 vmw_execbuf_rcache_update(rcache, res, ptr);
383 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
386 * @sw_context: The software context holding the validation list.
387 * @view: Pointer to the view resource.
389 * Returns 0 if success, negative error code otherwise.
391 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
392 struct vmw_resource *view)
397 * First add the resource the view is pointing to, otherwise it may be
398 * swapped out when the view is validated.
400 ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
401 vmw_view_dirtying(view));
405 return vmw_execbuf_res_noctx_val_add(sw_context, view,
410 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
411 * to to the validation list.
413 * @sw_context: The software context holding the validation list.
414 * @view_type: The view type to look up.
415 * @id: view id of the view.
417 * The view is represented by a view id and the DX context it's created on, or
418 * scheduled for creation on. If there is no DX context set, the function will
419 * return an -EINVAL error pointer.
421 * Returns: Unreferenced pointer to the resource on success, negative error
422 * pointer on failure.
424 static struct vmw_resource *
425 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
426 enum vmw_view_type view_type, u32 id)
428 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
429 struct vmw_resource *view;
433 return ERR_PTR(-EINVAL);
435 view = vmw_view_lookup(sw_context->man, view_type, id);
439 ret = vmw_view_res_val_add(sw_context, view);
447 * vmw_resource_context_res_add - Put resources previously bound to a context on
448 * the validation list
450 * @dev_priv: Pointer to a device private structure
451 * @sw_context: Pointer to a software context used for this command submission
452 * @ctx: Pointer to the context resource
454 * This function puts all resources that were previously bound to @ctx on the
455 * resource validation list. This is part of the context state reemission
457 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
458 struct vmw_sw_context *sw_context,
459 struct vmw_resource *ctx)
461 struct list_head *binding_list;
462 struct vmw_ctx_bindinfo *entry;
464 struct vmw_resource *res;
466 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
467 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
469 /* Add all cotables to the validation list. */
470 if (has_sm4_context(dev_priv) &&
471 vmw_res_type(ctx) == vmw_res_dx_context) {
472 for (i = 0; i < cotable_max; ++i) {
473 res = vmw_context_cotable(ctx, i);
477 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
479 if (unlikely(ret != 0))
484 /* Add all resources bound to the context to the validation list */
485 mutex_lock(&dev_priv->binding_mutex);
486 binding_list = vmw_context_binding_list(ctx);
488 list_for_each_entry(entry, binding_list, ctx_list) {
489 if (vmw_res_type(entry->res) == vmw_res_view)
490 ret = vmw_view_res_val_add(sw_context, entry->res);
492 ret = vmw_execbuf_res_noctx_val_add
493 (sw_context, entry->res,
494 vmw_binding_dirtying(entry->bt));
495 if (unlikely(ret != 0))
499 if (has_sm4_context(dev_priv) &&
500 vmw_res_type(ctx) == vmw_res_dx_context) {
501 struct vmw_buffer_object *dx_query_mob;
503 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
505 ret = vmw_validation_add_bo(sw_context->ctx,
506 dx_query_mob, true, false);
509 mutex_unlock(&dev_priv->binding_mutex);
514 * vmw_resource_relocation_add - Add a relocation to the relocation list
516 * @sw_context: Pointer to the software context.
517 * @res: The resource.
518 * @offset: Offset into the command buffer currently being parsed where the id
519 * that needs fixup is located. Granularity is one byte.
520 * @rel_type: Relocation type.
522 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
523 const struct vmw_resource *res,
524 unsigned long offset,
525 enum vmw_resource_relocation_type
528 struct vmw_resource_relocation *rel;
530 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
531 if (unlikely(!rel)) {
532 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
537 rel->offset = offset;
538 rel->rel_type = rel_type;
539 list_add_tail(&rel->head, &sw_context->res_relocations);
545 * vmw_resource_relocations_free - Free all relocations on a list
547 * @list: Pointer to the head of the relocation list
549 static void vmw_resource_relocations_free(struct list_head *list)
551 /* Memory is validation context memory, so no need to free it */
552 INIT_LIST_HEAD(list);
556 * vmw_resource_relocations_apply - Apply all relocations on a list
558 * @cb: Pointer to the start of the command buffer bein patch. This need not be
559 * the same buffer as the one being parsed when the relocation list was built,
560 * but the contents must be the same modulo the resource ids.
561 * @list: Pointer to the head of the relocation list.
563 static void vmw_resource_relocations_apply(uint32_t *cb,
564 struct list_head *list)
566 struct vmw_resource_relocation *rel;
568 /* Validate the struct vmw_resource_relocation member size */
569 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
570 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
572 list_for_each_entry(rel, list, head) {
573 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
574 switch (rel->rel_type) {
575 case vmw_res_rel_normal:
576 *addr = rel->res->id;
578 case vmw_res_rel_nop:
579 *addr = SVGA_3D_CMD_NOP;
582 if (rel->res->id == -1)
583 *addr = SVGA_3D_CMD_NOP;
589 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
590 struct vmw_sw_context *sw_context,
591 SVGA3dCmdHeader *header)
596 static int vmw_cmd_ok(struct vmw_private *dev_priv,
597 struct vmw_sw_context *sw_context,
598 SVGA3dCmdHeader *header)
604 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
607 * @sw_context: Pointer to the software context.
609 * Note that since vmware's command submission currently is protected by the
610 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
611 * only a single thread at once will attempt this.
613 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
617 ret = vmw_validation_res_reserve(sw_context->ctx, true);
621 if (sw_context->dx_query_mob) {
622 struct vmw_buffer_object *expected_dx_query_mob;
624 expected_dx_query_mob =
625 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
626 if (expected_dx_query_mob &&
627 expected_dx_query_mob != sw_context->dx_query_mob) {
636 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
637 * resource validate list unless it's already there.
639 * @dev_priv: Pointer to a device private structure.
640 * @sw_context: Pointer to the software context.
641 * @res_type: Resource type.
642 * @dirty: Whether to change dirty status.
643 * @converter: User-space visisble type specific information.
644 * @id_loc: Pointer to the location in the command buffer currently being parsed
645 * from where the user-space resource id handle is located.
646 * @p_res: Pointer to pointer to resource validalidation node. Populated on
650 vmw_cmd_res_check(struct vmw_private *dev_priv,
651 struct vmw_sw_context *sw_context,
652 enum vmw_res_type res_type,
654 const struct vmw_user_resource_conv *converter,
656 struct vmw_resource **p_res)
658 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
659 struct vmw_resource *res;
665 if (*id_loc == SVGA3D_INVALID_ID) {
666 if (res_type == vmw_res_context) {
667 VMW_DEBUG_USER("Illegal context invalid id.\n");
673 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
676 vmw_validation_res_set_dirty(sw_context->ctx,
677 rcache->private, dirty);
679 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
681 ret = vmw_validation_preload_res(sw_context->ctx, size);
685 res = vmw_user_resource_noref_lookup_handle
686 (dev_priv, sw_context->fp->tfile, *id_loc, converter);
688 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
689 (unsigned int) *id_loc);
693 ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
694 if (unlikely(ret != 0))
697 if (rcache->valid && rcache->res == res) {
698 rcache->valid_handle = true;
699 rcache->handle = *id_loc;
703 ret = vmw_resource_relocation_add(sw_context, res,
704 vmw_ptr_diff(sw_context->buf_start,
714 * vmw_rebind_all_dx_query - Rebind DX query associated with the context
716 * @ctx_res: context the query belongs to
718 * This function assumes binding_mutex is held.
720 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
722 struct vmw_private *dev_priv = ctx_res->dev_priv;
723 struct vmw_buffer_object *dx_query_mob;
724 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
726 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
728 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
731 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
735 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
736 cmd->header.size = sizeof(cmd->body);
737 cmd->body.cid = ctx_res->id;
738 cmd->body.mobid = dx_query_mob->base.resource->start;
739 vmw_cmd_commit(dev_priv, sizeof(*cmd));
741 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
747 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
750 * @sw_context: Pointer to the software context.
752 * Rebind context binding points that have been scrubbed because of eviction.
754 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
756 struct vmw_ctx_validation_info *val;
759 list_for_each_entry(val, &sw_context->ctx_list, head) {
760 ret = vmw_binding_rebind_all(val->cur);
761 if (unlikely(ret != 0)) {
762 if (ret != -ERESTARTSYS)
763 VMW_DEBUG_USER("Failed to rebind context.\n");
767 ret = vmw_rebind_all_dx_query(val->ctx);
769 VMW_DEBUG_USER("Failed to rebind queries.\n");
778 * vmw_view_bindings_add - Add an array of view bindings to a context binding
781 * @sw_context: The execbuf state used for this command.
782 * @view_type: View type for the bindings.
783 * @binding_type: Binding type for the bindings.
784 * @shader_slot: The shader slot to user for the bindings.
785 * @view_ids: Array of view ids to be bound.
786 * @num_views: Number of view ids in @view_ids.
787 * @first_slot: The binding slot to be used for the first view id in @view_ids.
789 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
790 enum vmw_view_type view_type,
791 enum vmw_ctx_binding_type binding_type,
793 uint32 view_ids[], u32 num_views,
796 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
802 for (i = 0; i < num_views; ++i) {
803 struct vmw_ctx_bindinfo_view binding;
804 struct vmw_resource *view = NULL;
806 if (view_ids[i] != SVGA3D_INVALID_ID) {
807 view = vmw_view_id_val_add(sw_context, view_type,
810 VMW_DEBUG_USER("View not found.\n");
811 return PTR_ERR(view);
814 binding.bi.ctx = ctx_node->ctx;
815 binding.bi.res = view;
816 binding.bi.bt = binding_type;
817 binding.shader_slot = shader_slot;
818 binding.slot = first_slot + i;
819 vmw_binding_add(ctx_node->staged, &binding.bi,
820 shader_slot, binding.slot);
827 * vmw_cmd_cid_check - Check a command header for valid context information.
829 * @dev_priv: Pointer to a device private structure.
830 * @sw_context: Pointer to the software context.
831 * @header: A command header with an embedded user-space context handle.
833 * Convenience function: Call vmw_cmd_res_check with the user-space context
834 * handle embedded in @header.
836 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
837 struct vmw_sw_context *sw_context,
838 SVGA3dCmdHeader *header)
840 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
841 container_of(header, typeof(*cmd), header);
843 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
844 VMW_RES_DIRTY_SET, user_context_converter,
849 * vmw_execbuf_info_from_res - Get the private validation metadata for a
850 * recently validated resource
852 * @sw_context: Pointer to the command submission context
855 * The resource pointed to by @res needs to be present in the command submission
856 * context's resource cache and hence the last resource of that type to be
857 * processed by the validation code.
859 * Return: a pointer to the private metadata of the resource, or NULL if it
862 static struct vmw_ctx_validation_info *
863 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
864 struct vmw_resource *res)
866 struct vmw_res_cache_entry *rcache =
867 &sw_context->res_cache[vmw_res_type(res)];
869 if (rcache->valid && rcache->res == res)
870 return rcache->private;
876 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
877 struct vmw_sw_context *sw_context,
878 SVGA3dCmdHeader *header)
880 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
881 struct vmw_resource *ctx;
882 struct vmw_resource *res;
885 cmd = container_of(header, typeof(*cmd), header);
887 if (cmd->body.type >= SVGA3D_RT_MAX) {
888 VMW_DEBUG_USER("Illegal render target type %u.\n",
889 (unsigned int) cmd->body.type);
893 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
894 VMW_RES_DIRTY_SET, user_context_converter,
895 &cmd->body.cid, &ctx);
896 if (unlikely(ret != 0))
899 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
900 VMW_RES_DIRTY_SET, user_surface_converter,
901 &cmd->body.target.sid, &res);
905 if (dev_priv->has_mob) {
906 struct vmw_ctx_bindinfo_view binding;
907 struct vmw_ctx_validation_info *node;
909 node = vmw_execbuf_info_from_res(sw_context, ctx);
913 binding.bi.ctx = ctx;
914 binding.bi.res = res;
915 binding.bi.bt = vmw_ctx_binding_rt;
916 binding.slot = cmd->body.type;
917 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
923 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
924 struct vmw_sw_context *sw_context,
925 SVGA3dCmdHeader *header)
927 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
930 cmd = container_of(header, typeof(*cmd), header);
932 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
933 VMW_RES_DIRTY_NONE, user_surface_converter,
934 &cmd->body.src.sid, NULL);
938 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
939 VMW_RES_DIRTY_SET, user_surface_converter,
940 &cmd->body.dest.sid, NULL);
943 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
944 struct vmw_sw_context *sw_context,
945 SVGA3dCmdHeader *header)
947 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
950 cmd = container_of(header, typeof(*cmd), header);
951 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
952 VMW_RES_DIRTY_NONE, user_surface_converter,
953 &cmd->body.src, NULL);
957 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
958 VMW_RES_DIRTY_SET, user_surface_converter,
959 &cmd->body.dest, NULL);
962 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
963 struct vmw_sw_context *sw_context,
964 SVGA3dCmdHeader *header)
966 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
969 cmd = container_of(header, typeof(*cmd), header);
970 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
971 VMW_RES_DIRTY_NONE, user_surface_converter,
972 &cmd->body.srcSid, NULL);
976 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
977 VMW_RES_DIRTY_SET, user_surface_converter,
978 &cmd->body.dstSid, NULL);
981 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
982 struct vmw_sw_context *sw_context,
983 SVGA3dCmdHeader *header)
985 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
988 cmd = container_of(header, typeof(*cmd), header);
989 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
990 VMW_RES_DIRTY_NONE, user_surface_converter,
991 &cmd->body.src.sid, NULL);
992 if (unlikely(ret != 0))
995 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
996 VMW_RES_DIRTY_SET, user_surface_converter,
997 &cmd->body.dest.sid, NULL);
1000 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1001 struct vmw_sw_context *sw_context,
1002 SVGA3dCmdHeader *header)
1004 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
1005 container_of(header, typeof(*cmd), header);
1007 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1008 VMW_RES_DIRTY_NONE, user_surface_converter,
1009 &cmd->body.srcImage.sid, NULL);
1012 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1013 struct vmw_sw_context *sw_context,
1014 SVGA3dCmdHeader *header)
1016 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1017 container_of(header, typeof(*cmd), header);
1019 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1020 VMW_RES_DIRTY_NONE, user_surface_converter,
1021 &cmd->body.sid, NULL);
1025 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1027 * @dev_priv: The device private structure.
1028 * @new_query_bo: The new buffer holding query results.
1029 * @sw_context: The software context used for this command submission.
1031 * This function checks whether @new_query_bo is suitable for holding query
1032 * results, and if another buffer currently is pinned for query results. If so,
1033 * the function prepares the state of @sw_context for switching pinned buffers
1034 * after successful submission of the current command batch.
1036 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1037 struct vmw_buffer_object *new_query_bo,
1038 struct vmw_sw_context *sw_context)
1040 struct vmw_res_cache_entry *ctx_entry =
1041 &sw_context->res_cache[vmw_res_context];
1044 BUG_ON(!ctx_entry->valid);
1045 sw_context->last_query_ctx = ctx_entry->res;
1047 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1049 if (unlikely(new_query_bo->base.resource->num_pages > 4)) {
1050 VMW_DEBUG_USER("Query buffer too large.\n");
1054 if (unlikely(sw_context->cur_query_bo != NULL)) {
1055 sw_context->needs_post_query_barrier = true;
1056 ret = vmw_validation_add_bo(sw_context->ctx,
1057 sw_context->cur_query_bo,
1058 dev_priv->has_mob, false);
1059 if (unlikely(ret != 0))
1062 sw_context->cur_query_bo = new_query_bo;
1064 ret = vmw_validation_add_bo(sw_context->ctx,
1065 dev_priv->dummy_query_bo,
1066 dev_priv->has_mob, false);
1067 if (unlikely(ret != 0))
1075 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1077 * @dev_priv: The device private structure.
1078 * @sw_context: The software context used for this command submission batch.
1080 * This function will check if we're switching query buffers, and will then,
1081 * issue a dummy occlusion query wait used as a query barrier. When the fence
1082 * object following that query wait has signaled, we are sure that all preceding
1083 * queries have finished, and the old query buffer can be unpinned. However,
1084 * since both the new query buffer and the old one are fenced with that fence,
1085 * we can do an asynchronus unpin now, and be sure that the old query buffer
1086 * won't be moved until the fence has signaled.
1088 * As mentioned above, both the new - and old query buffers need to be fenced
1089 * using a sequence emitted *after* calling this function.
1091 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1092 struct vmw_sw_context *sw_context)
1095 * The validate list should still hold references to all
1098 if (sw_context->needs_post_query_barrier) {
1099 struct vmw_res_cache_entry *ctx_entry =
1100 &sw_context->res_cache[vmw_res_context];
1101 struct vmw_resource *ctx;
1104 BUG_ON(!ctx_entry->valid);
1105 ctx = ctx_entry->res;
1107 ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1109 if (unlikely(ret != 0))
1110 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1113 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1114 if (dev_priv->pinned_bo) {
1115 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1116 vmw_bo_unreference(&dev_priv->pinned_bo);
1119 if (!sw_context->needs_post_query_barrier) {
1120 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1123 * We pin also the dummy_query_bo buffer so that we
1124 * don't need to validate it when emitting dummy queries
1125 * in context destroy paths.
1127 if (!dev_priv->dummy_query_bo_pinned) {
1128 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1130 dev_priv->dummy_query_bo_pinned = true;
1133 BUG_ON(sw_context->last_query_ctx == NULL);
1134 dev_priv->query_cid = sw_context->last_query_ctx->id;
1135 dev_priv->query_cid_valid = true;
1136 dev_priv->pinned_bo =
1137 vmw_bo_reference(sw_context->cur_query_bo);
1143 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1146 * @dev_priv: Pointer to a device private structure.
1147 * @sw_context: The software context used for this command batch validation.
1148 * @id: Pointer to the user-space handle to be translated.
1149 * @vmw_bo_p: Points to a location that, on successful return will carry a
1150 * non-reference-counted pointer to the buffer object identified by the
1151 * user-space handle in @id.
1153 * This function saves information needed to translate a user-space buffer
1154 * handle to a MOB id. The translation does not take place immediately, but
1155 * during a call to vmw_apply_relocations().
1157 * This function builds a relocation list and a list of buffers to validate. The
1158 * former needs to be freed using either vmw_apply_relocations() or
1159 * vmw_free_relocations(). The latter needs to be freed using
1160 * vmw_clear_validations.
1162 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1163 struct vmw_sw_context *sw_context,
1165 struct vmw_buffer_object **vmw_bo_p)
1167 struct vmw_buffer_object *vmw_bo;
1168 uint32_t handle = *id;
1169 struct vmw_relocation *reloc;
1172 vmw_validation_preload_bo(sw_context->ctx);
1173 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1174 if (IS_ERR(vmw_bo)) {
1175 VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1176 return PTR_ERR(vmw_bo);
1179 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1180 vmw_user_bo_noref_release();
1181 if (unlikely(ret != 0))
1184 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1188 reloc->mob_loc = id;
1189 reloc->vbo = vmw_bo;
1192 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1198 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1199 * to a valid SVGAGuestPtr
1201 * @dev_priv: Pointer to a device private structure.
1202 * @sw_context: The software context used for this command batch validation.
1203 * @ptr: Pointer to the user-space handle to be translated.
1204 * @vmw_bo_p: Points to a location that, on successful return will carry a
1205 * non-reference-counted pointer to the DMA buffer identified by the user-space
1208 * This function saves information needed to translate a user-space buffer
1209 * handle to a valid SVGAGuestPtr. The translation does not take place
1210 * immediately, but during a call to vmw_apply_relocations().
1212 * This function builds a relocation list and a list of buffers to validate.
1213 * The former needs to be freed using either vmw_apply_relocations() or
1214 * vmw_free_relocations(). The latter needs to be freed using
1215 * vmw_clear_validations.
1217 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1218 struct vmw_sw_context *sw_context,
1220 struct vmw_buffer_object **vmw_bo_p)
1222 struct vmw_buffer_object *vmw_bo;
1223 uint32_t handle = ptr->gmrId;
1224 struct vmw_relocation *reloc;
1227 vmw_validation_preload_bo(sw_context->ctx);
1228 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1229 if (IS_ERR(vmw_bo)) {
1230 VMW_DEBUG_USER("Could not find or use GMR region.\n");
1231 return PTR_ERR(vmw_bo);
1234 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1235 vmw_user_bo_noref_release();
1236 if (unlikely(ret != 0))
1239 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1243 reloc->location = ptr;
1244 reloc->vbo = vmw_bo;
1246 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1252 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1254 * @dev_priv: Pointer to a device private struct.
1255 * @sw_context: The software context used for this command submission.
1256 * @header: Pointer to the command header in the command stream.
1258 * This function adds the new query into the query COTABLE
1260 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1261 struct vmw_sw_context *sw_context,
1262 SVGA3dCmdHeader *header)
1264 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1265 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1266 struct vmw_resource *cotable_res;
1272 cmd = container_of(header, typeof(*cmd), header);
1274 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1275 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1278 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1279 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1285 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1287 * @dev_priv: Pointer to a device private struct.
1288 * @sw_context: The software context used for this command submission.
1289 * @header: Pointer to the command header in the command stream.
1291 * The query bind operation will eventually associate the query ID with its
1292 * backing MOB. In this function, we take the user mode MOB ID and use
1293 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1295 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1296 struct vmw_sw_context *sw_context,
1297 SVGA3dCmdHeader *header)
1299 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1300 struct vmw_buffer_object *vmw_bo;
1303 cmd = container_of(header, typeof(*cmd), header);
1306 * Look up the buffer pointed to by q.mobid, put it on the relocation
1307 * list so its kernel mode MOB ID can be filled in later
1309 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1315 sw_context->dx_query_mob = vmw_bo;
1316 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1321 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1323 * @dev_priv: Pointer to a device private struct.
1324 * @sw_context: The software context used for this command submission.
1325 * @header: Pointer to the command header in the command stream.
1327 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1328 struct vmw_sw_context *sw_context,
1329 SVGA3dCmdHeader *header)
1331 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1332 container_of(header, typeof(*cmd), header);
1334 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1335 VMW_RES_DIRTY_SET, user_context_converter,
1336 &cmd->body.cid, NULL);
1340 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1342 * @dev_priv: Pointer to a device private struct.
1343 * @sw_context: The software context used for this command submission.
1344 * @header: Pointer to the command header in the command stream.
1346 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1347 struct vmw_sw_context *sw_context,
1348 SVGA3dCmdHeader *header)
1350 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1351 container_of(header, typeof(*cmd), header);
1353 if (unlikely(dev_priv->has_mob)) {
1354 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1356 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1358 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1359 gb_cmd.header.size = cmd->header.size;
1360 gb_cmd.body.cid = cmd->body.cid;
1361 gb_cmd.body.type = cmd->body.type;
1363 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1364 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1367 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1368 VMW_RES_DIRTY_SET, user_context_converter,
1369 &cmd->body.cid, NULL);
1373 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1375 * @dev_priv: Pointer to a device private struct.
1376 * @sw_context: The software context used for this command submission.
1377 * @header: Pointer to the command header in the command stream.
1379 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1380 struct vmw_sw_context *sw_context,
1381 SVGA3dCmdHeader *header)
1383 struct vmw_buffer_object *vmw_bo;
1384 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1387 cmd = container_of(header, typeof(*cmd), header);
1388 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1389 if (unlikely(ret != 0))
1392 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1394 if (unlikely(ret != 0))
1397 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1403 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1405 * @dev_priv: Pointer to a device private struct.
1406 * @sw_context: The software context used for this command submission.
1407 * @header: Pointer to the command header in the command stream.
1409 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1410 struct vmw_sw_context *sw_context,
1411 SVGA3dCmdHeader *header)
1413 struct vmw_buffer_object *vmw_bo;
1414 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1417 cmd = container_of(header, typeof(*cmd), header);
1418 if (dev_priv->has_mob) {
1419 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1421 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1423 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1424 gb_cmd.header.size = cmd->header.size;
1425 gb_cmd.body.cid = cmd->body.cid;
1426 gb_cmd.body.type = cmd->body.type;
1427 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1428 gb_cmd.body.offset = cmd->body.guestResult.offset;
1430 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1431 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1434 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1435 if (unlikely(ret != 0))
1438 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1439 &cmd->body.guestResult, &vmw_bo);
1440 if (unlikely(ret != 0))
1443 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1449 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1451 * @dev_priv: Pointer to a device private struct.
1452 * @sw_context: The software context used for this command submission.
1453 * @header: Pointer to the command header in the command stream.
1455 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1456 struct vmw_sw_context *sw_context,
1457 SVGA3dCmdHeader *header)
1459 struct vmw_buffer_object *vmw_bo;
1460 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1463 cmd = container_of(header, typeof(*cmd), header);
1464 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1465 if (unlikely(ret != 0))
1468 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1470 if (unlikely(ret != 0))
1477 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1479 * @dev_priv: Pointer to a device private struct.
1480 * @sw_context: The software context used for this command submission.
1481 * @header: Pointer to the command header in the command stream.
1483 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1484 struct vmw_sw_context *sw_context,
1485 SVGA3dCmdHeader *header)
1487 struct vmw_buffer_object *vmw_bo;
1488 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1491 cmd = container_of(header, typeof(*cmd), header);
1492 if (dev_priv->has_mob) {
1493 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1495 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1497 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1498 gb_cmd.header.size = cmd->header.size;
1499 gb_cmd.body.cid = cmd->body.cid;
1500 gb_cmd.body.type = cmd->body.type;
1501 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1502 gb_cmd.body.offset = cmd->body.guestResult.offset;
1504 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1505 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1508 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1509 if (unlikely(ret != 0))
1512 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1513 &cmd->body.guestResult, &vmw_bo);
1514 if (unlikely(ret != 0))
1520 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1521 struct vmw_sw_context *sw_context,
1522 SVGA3dCmdHeader *header)
1524 struct vmw_buffer_object *vmw_bo = NULL;
1525 struct vmw_surface *srf = NULL;
1526 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1528 SVGA3dCmdSurfaceDMASuffix *suffix;
1532 cmd = container_of(header, typeof(*cmd), header);
1533 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1534 header->size - sizeof(*suffix));
1536 /* Make sure device and verifier stays in sync. */
1537 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1538 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1542 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1543 &cmd->body.guest.ptr, &vmw_bo);
1544 if (unlikely(ret != 0))
1547 /* Make sure DMA doesn't cross BO boundaries. */
1548 bo_size = vmw_bo->base.base.size;
1549 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1550 VMW_DEBUG_USER("Invalid DMA offset.\n");
1554 bo_size -= cmd->body.guest.ptr.offset;
1555 if (unlikely(suffix->maximumOffset > bo_size))
1556 suffix->maximumOffset = bo_size;
1558 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1559 VMW_RES_DIRTY_SET : 0;
1560 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1561 dirty, user_surface_converter,
1562 &cmd->body.host.sid, NULL);
1563 if (unlikely(ret != 0)) {
1564 if (unlikely(ret != -ERESTARTSYS))
1565 VMW_DEBUG_USER("could not find surface for DMA.\n");
1569 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1571 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1576 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1577 struct vmw_sw_context *sw_context,
1578 SVGA3dCmdHeader *header)
1580 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1581 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1582 (unsigned long)header + sizeof(*cmd));
1583 SVGA3dPrimitiveRange *range;
1588 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1589 if (unlikely(ret != 0))
1592 cmd = container_of(header, typeof(*cmd), header);
1593 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1595 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1596 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1600 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1601 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1603 user_surface_converter,
1604 &decl->array.surfaceId, NULL);
1605 if (unlikely(ret != 0))
1609 maxnum = (header->size - sizeof(cmd->body) -
1610 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1611 if (unlikely(cmd->body.numRanges > maxnum)) {
1612 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1616 range = (SVGA3dPrimitiveRange *) decl;
1617 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1618 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1620 user_surface_converter,
1621 &range->indexArray.surfaceId, NULL);
1622 if (unlikely(ret != 0))
1628 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1629 struct vmw_sw_context *sw_context,
1630 SVGA3dCmdHeader *header)
1632 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1633 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1634 ((unsigned long) header + header->size + sizeof(header));
1635 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1636 ((unsigned long) header + sizeof(*cmd));
1637 struct vmw_resource *ctx;
1638 struct vmw_resource *res;
1641 cmd = container_of(header, typeof(*cmd), header);
1643 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1644 VMW_RES_DIRTY_SET, user_context_converter,
1645 &cmd->body.cid, &ctx);
1646 if (unlikely(ret != 0))
1649 for (; cur_state < last_state; ++cur_state) {
1650 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1653 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1654 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1655 (unsigned int) cur_state->stage);
1659 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1661 user_surface_converter,
1662 &cur_state->value, &res);
1663 if (unlikely(ret != 0))
1666 if (dev_priv->has_mob) {
1667 struct vmw_ctx_bindinfo_tex binding;
1668 struct vmw_ctx_validation_info *node;
1670 node = vmw_execbuf_info_from_res(sw_context, ctx);
1674 binding.bi.ctx = ctx;
1675 binding.bi.res = res;
1676 binding.bi.bt = vmw_ctx_binding_tex;
1677 binding.texture_stage = cur_state->stage;
1678 vmw_binding_add(node->staged, &binding.bi, 0,
1679 binding.texture_stage);
1686 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1687 struct vmw_sw_context *sw_context,
1690 struct vmw_buffer_object *vmw_bo;
1694 SVGAFifoCmdDefineGMRFB body;
1697 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1702 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1705 * @dev_priv: Pointer to a device private struct.
1706 * @sw_context: The software context being used for this batch.
1707 * @res: Pointer to the resource.
1708 * @buf_id: Pointer to the user-space backup buffer handle in the command
1710 * @backup_offset: Offset of backup into MOB.
1712 * This function prepares for registering a switch of backup buffers in the
1713 * resource metadata just prior to unreserving. It's basically a wrapper around
1714 * vmw_cmd_res_switch_backup with a different interface.
1716 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1717 struct vmw_sw_context *sw_context,
1718 struct vmw_resource *res, uint32_t *buf_id,
1719 unsigned long backup_offset)
1721 struct vmw_buffer_object *vbo;
1725 info = vmw_execbuf_info_from_res(sw_context, res);
1729 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1733 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1739 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1741 * @dev_priv: Pointer to a device private struct.
1742 * @sw_context: The software context being used for this batch.
1743 * @res_type: The resource type.
1744 * @converter: Information about user-space binding for this resource type.
1745 * @res_id: Pointer to the user-space resource handle in the command stream.
1746 * @buf_id: Pointer to the user-space backup buffer handle in the command
1748 * @backup_offset: Offset of backup into MOB.
1750 * This function prepares for registering a switch of backup buffers in the
1751 * resource metadata just prior to unreserving. It's basically a wrapper around
1752 * vmw_cmd_res_switch_backup with a different interface.
1754 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1755 struct vmw_sw_context *sw_context,
1756 enum vmw_res_type res_type,
1757 const struct vmw_user_resource_conv
1758 *converter, uint32_t *res_id, uint32_t *buf_id,
1759 unsigned long backup_offset)
1761 struct vmw_resource *res;
1764 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1765 VMW_RES_DIRTY_NONE, converter, res_id, &res);
1769 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1774 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1776 * @dev_priv: Pointer to a device private struct.
1777 * @sw_context: The software context being used for this batch.
1778 * @header: Pointer to the command header in the command stream.
1780 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1781 struct vmw_sw_context *sw_context,
1782 SVGA3dCmdHeader *header)
1784 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1785 container_of(header, typeof(*cmd), header);
1787 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1788 user_surface_converter, &cmd->body.sid,
1789 &cmd->body.mobid, 0);
1793 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1795 * @dev_priv: Pointer to a device private struct.
1796 * @sw_context: The software context being used for this batch.
1797 * @header: Pointer to the command header in the command stream.
1799 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1800 struct vmw_sw_context *sw_context,
1801 SVGA3dCmdHeader *header)
1803 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1804 container_of(header, typeof(*cmd), header);
1806 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1807 VMW_RES_DIRTY_NONE, user_surface_converter,
1808 &cmd->body.image.sid, NULL);
1812 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1814 * @dev_priv: Pointer to a device private struct.
1815 * @sw_context: The software context being used for this batch.
1816 * @header: Pointer to the command header in the command stream.
1818 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1819 struct vmw_sw_context *sw_context,
1820 SVGA3dCmdHeader *header)
1822 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1823 container_of(header, typeof(*cmd), header);
1825 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1826 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1827 &cmd->body.sid, NULL);
1831 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1833 * @dev_priv: Pointer to a device private struct.
1834 * @sw_context: The software context being used for this batch.
1835 * @header: Pointer to the command header in the command stream.
1837 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1838 struct vmw_sw_context *sw_context,
1839 SVGA3dCmdHeader *header)
1841 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1842 container_of(header, typeof(*cmd), header);
1844 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1845 VMW_RES_DIRTY_NONE, user_surface_converter,
1846 &cmd->body.image.sid, NULL);
1850 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1853 * @dev_priv: Pointer to a device private struct.
1854 * @sw_context: The software context being used for this batch.
1855 * @header: Pointer to the command header in the command stream.
1857 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1858 struct vmw_sw_context *sw_context,
1859 SVGA3dCmdHeader *header)
1861 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1862 container_of(header, typeof(*cmd), header);
1864 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1865 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1866 &cmd->body.sid, NULL);
1870 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1873 * @dev_priv: Pointer to a device private struct.
1874 * @sw_context: The software context being used for this batch.
1875 * @header: Pointer to the command header in the command stream.
1877 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1878 struct vmw_sw_context *sw_context,
1879 SVGA3dCmdHeader *header)
1881 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1882 container_of(header, typeof(*cmd), header);
1884 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1885 VMW_RES_DIRTY_NONE, user_surface_converter,
1886 &cmd->body.image.sid, NULL);
1890 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1893 * @dev_priv: Pointer to a device private struct.
1894 * @sw_context: The software context being used for this batch.
1895 * @header: Pointer to the command header in the command stream.
1897 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1898 struct vmw_sw_context *sw_context,
1899 SVGA3dCmdHeader *header)
1901 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1902 container_of(header, typeof(*cmd), header);
1904 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1905 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1906 &cmd->body.sid, NULL);
1910 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1912 * @dev_priv: Pointer to a device private struct.
1913 * @sw_context: The software context being used for this batch.
1914 * @header: Pointer to the command header in the command stream.
1916 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1917 struct vmw_sw_context *sw_context,
1918 SVGA3dCmdHeader *header)
1920 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1923 struct vmw_resource *ctx;
1925 cmd = container_of(header, typeof(*cmd), header);
1927 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1928 VMW_RES_DIRTY_SET, user_context_converter,
1929 &cmd->body.cid, &ctx);
1930 if (unlikely(ret != 0))
1933 if (unlikely(!dev_priv->has_mob))
1936 size = cmd->header.size - sizeof(cmd->body);
1937 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1938 cmd->body.shid, cmd + 1, cmd->body.type,
1939 size, &sw_context->staged_cmd_res);
1940 if (unlikely(ret != 0))
1943 return vmw_resource_relocation_add(sw_context, NULL,
1944 vmw_ptr_diff(sw_context->buf_start,
1950 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1952 * @dev_priv: Pointer to a device private struct.
1953 * @sw_context: The software context being used for this batch.
1954 * @header: Pointer to the command header in the command stream.
1956 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1957 struct vmw_sw_context *sw_context,
1958 SVGA3dCmdHeader *header)
1960 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1962 struct vmw_resource *ctx;
1964 cmd = container_of(header, typeof(*cmd), header);
1966 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1967 VMW_RES_DIRTY_SET, user_context_converter,
1968 &cmd->body.cid, &ctx);
1969 if (unlikely(ret != 0))
1972 if (unlikely(!dev_priv->has_mob))
1975 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1976 cmd->body.type, &sw_context->staged_cmd_res);
1977 if (unlikely(ret != 0))
1980 return vmw_resource_relocation_add(sw_context, NULL,
1981 vmw_ptr_diff(sw_context->buf_start,
1987 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1989 * @dev_priv: Pointer to a device private struct.
1990 * @sw_context: The software context being used for this batch.
1991 * @header: Pointer to the command header in the command stream.
1993 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1994 struct vmw_sw_context *sw_context,
1995 SVGA3dCmdHeader *header)
1997 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1998 struct vmw_ctx_bindinfo_shader binding;
1999 struct vmw_resource *ctx, *res = NULL;
2000 struct vmw_ctx_validation_info *ctx_info;
2003 cmd = container_of(header, typeof(*cmd), header);
2005 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2006 VMW_DEBUG_USER("Illegal shader type %u.\n",
2007 (unsigned int) cmd->body.type);
2011 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2012 VMW_RES_DIRTY_SET, user_context_converter,
2013 &cmd->body.cid, &ctx);
2014 if (unlikely(ret != 0))
2017 if (!dev_priv->has_mob)
2020 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2022 * This is the compat shader path - Per device guest-backed
2023 * shaders, but user-space thinks it's per context host-
2026 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2027 cmd->body.shid, cmd->body.type);
2029 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2030 VMW_RES_DIRTY_NONE);
2031 if (unlikely(ret != 0))
2034 ret = vmw_resource_relocation_add
2036 vmw_ptr_diff(sw_context->buf_start,
2038 vmw_res_rel_normal);
2039 if (unlikely(ret != 0))
2044 if (IS_ERR_OR_NULL(res)) {
2045 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2047 user_shader_converter, &cmd->body.shid,
2049 if (unlikely(ret != 0))
2053 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2057 binding.bi.ctx = ctx;
2058 binding.bi.res = res;
2059 binding.bi.bt = vmw_ctx_binding_shader;
2060 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2061 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2067 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2069 * @dev_priv: Pointer to a device private struct.
2070 * @sw_context: The software context being used for this batch.
2071 * @header: Pointer to the command header in the command stream.
2073 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2074 struct vmw_sw_context *sw_context,
2075 SVGA3dCmdHeader *header)
2077 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2080 cmd = container_of(header, typeof(*cmd), header);
2082 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2083 VMW_RES_DIRTY_SET, user_context_converter,
2084 &cmd->body.cid, NULL);
2085 if (unlikely(ret != 0))
2088 if (dev_priv->has_mob)
2089 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2095 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2097 * @dev_priv: Pointer to a device private struct.
2098 * @sw_context: The software context being used for this batch.
2099 * @header: Pointer to the command header in the command stream.
2101 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2102 struct vmw_sw_context *sw_context,
2103 SVGA3dCmdHeader *header)
2105 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2106 container_of(header, typeof(*cmd), header);
2108 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2109 user_shader_converter, &cmd->body.shid,
2110 &cmd->body.mobid, cmd->body.offsetInBytes);
2114 * vmw_cmd_dx_set_single_constant_buffer - Validate
2115 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2117 * @dev_priv: Pointer to a device private struct.
2118 * @sw_context: The software context being used for this batch.
2119 * @header: Pointer to the command header in the command stream.
2122 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2123 struct vmw_sw_context *sw_context,
2124 SVGA3dCmdHeader *header)
2126 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2127 SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
2128 SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
2130 struct vmw_resource *res = NULL;
2131 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2132 struct vmw_ctx_bindinfo_cb binding;
2138 cmd = container_of(header, typeof(*cmd), header);
2139 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2140 VMW_RES_DIRTY_NONE, user_surface_converter,
2141 &cmd->body.sid, &res);
2142 if (unlikely(ret != 0))
2145 binding.bi.ctx = ctx_node->ctx;
2146 binding.bi.res = res;
2147 binding.bi.bt = vmw_ctx_binding_cb;
2148 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2149 binding.offset = cmd->body.offsetInBytes;
2150 binding.size = cmd->body.sizeInBytes;
2151 binding.slot = cmd->body.slot;
2153 if (binding.shader_slot >= max_shader_num ||
2154 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2155 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2156 (unsigned int) cmd->body.type,
2157 (unsigned int) binding.slot);
2161 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2168 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2171 * @dev_priv: Pointer to a device private struct.
2172 * @sw_context: The software context being used for this batch.
2173 * @header: Pointer to the command header in the command stream.
2175 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2176 struct vmw_sw_context *sw_context,
2177 SVGA3dCmdHeader *header)
2179 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2180 container_of(header, typeof(*cmd), header);
2181 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2182 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2184 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2185 sizeof(SVGA3dShaderResourceViewId);
2187 if ((u64) cmd->body.startView + (u64) num_sr_view >
2188 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2189 cmd->body.type >= max_allowed) {
2190 VMW_DEBUG_USER("Invalid shader binding.\n");
2194 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2196 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2197 (void *) &cmd[1], num_sr_view,
2198 cmd->body.startView);
2202 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2204 * @dev_priv: Pointer to a device private struct.
2205 * @sw_context: The software context being used for this batch.
2206 * @header: Pointer to the command header in the command stream.
2208 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2209 struct vmw_sw_context *sw_context,
2210 SVGA3dCmdHeader *header)
2212 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2213 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2214 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2215 struct vmw_resource *res = NULL;
2216 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2217 struct vmw_ctx_bindinfo_shader binding;
2223 cmd = container_of(header, typeof(*cmd), header);
2225 if (cmd->body.type >= max_allowed ||
2226 cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2227 VMW_DEBUG_USER("Illegal shader type %u.\n",
2228 (unsigned int) cmd->body.type);
2232 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2233 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2235 VMW_DEBUG_USER("Could not find shader for binding.\n");
2236 return PTR_ERR(res);
2239 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2240 VMW_RES_DIRTY_NONE);
2245 binding.bi.ctx = ctx_node->ctx;
2246 binding.bi.res = res;
2247 binding.bi.bt = vmw_ctx_binding_dx_shader;
2248 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2250 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2256 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2259 * @dev_priv: Pointer to a device private struct.
2260 * @sw_context: The software context being used for this batch.
2261 * @header: Pointer to the command header in the command stream.
2263 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2264 struct vmw_sw_context *sw_context,
2265 SVGA3dCmdHeader *header)
2267 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2268 struct vmw_ctx_bindinfo_vb binding;
2269 struct vmw_resource *res;
2271 SVGA3dCmdHeader header;
2272 SVGA3dCmdDXSetVertexBuffers body;
2273 SVGA3dVertexBuffer buf[];
2280 cmd = container_of(header, typeof(*cmd), header);
2281 num = (cmd->header.size - sizeof(cmd->body)) /
2282 sizeof(SVGA3dVertexBuffer);
2283 if ((u64)num + (u64)cmd->body.startBuffer >
2284 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2285 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2289 for (i = 0; i < num; i++) {
2290 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2292 user_surface_converter,
2293 &cmd->buf[i].sid, &res);
2294 if (unlikely(ret != 0))
2297 binding.bi.ctx = ctx_node->ctx;
2298 binding.bi.bt = vmw_ctx_binding_vb;
2299 binding.bi.res = res;
2300 binding.offset = cmd->buf[i].offset;
2301 binding.stride = cmd->buf[i].stride;
2302 binding.slot = i + cmd->body.startBuffer;
2304 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2311 * vmw_cmd_dx_set_index_buffer - Validate
2312 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2314 * @dev_priv: Pointer to a device private struct.
2315 * @sw_context: The software context being used for this batch.
2316 * @header: Pointer to the command header in the command stream.
2318 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2319 struct vmw_sw_context *sw_context,
2320 SVGA3dCmdHeader *header)
2322 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2323 struct vmw_ctx_bindinfo_ib binding;
2324 struct vmw_resource *res;
2325 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2331 cmd = container_of(header, typeof(*cmd), header);
2332 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2333 VMW_RES_DIRTY_NONE, user_surface_converter,
2334 &cmd->body.sid, &res);
2335 if (unlikely(ret != 0))
2338 binding.bi.ctx = ctx_node->ctx;
2339 binding.bi.res = res;
2340 binding.bi.bt = vmw_ctx_binding_ib;
2341 binding.offset = cmd->body.offset;
2342 binding.format = cmd->body.format;
2344 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2350 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2353 * @dev_priv: Pointer to a device private struct.
2354 * @sw_context: The software context being used for this batch.
2355 * @header: Pointer to the command header in the command stream.
2357 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2358 struct vmw_sw_context *sw_context,
2359 SVGA3dCmdHeader *header)
2361 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2362 container_of(header, typeof(*cmd), header);
2363 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2364 sizeof(SVGA3dRenderTargetViewId);
2367 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2368 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2372 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2373 0, &cmd->body.depthStencilViewId, 1, 0);
2377 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2378 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2383 * vmw_cmd_dx_clear_rendertarget_view - Validate
2384 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2386 * @dev_priv: Pointer to a device private struct.
2387 * @sw_context: The software context being used for this batch.
2388 * @header: Pointer to the command header in the command stream.
2390 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2391 struct vmw_sw_context *sw_context,
2392 SVGA3dCmdHeader *header)
2394 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2395 container_of(header, typeof(*cmd), header);
2396 struct vmw_resource *ret;
2398 ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2399 cmd->body.renderTargetViewId);
2401 return PTR_ERR_OR_ZERO(ret);
2405 * vmw_cmd_dx_clear_depthstencil_view - Validate
2406 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2408 * @dev_priv: Pointer to a device private struct.
2409 * @sw_context: The software context being used for this batch.
2410 * @header: Pointer to the command header in the command stream.
2412 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2413 struct vmw_sw_context *sw_context,
2414 SVGA3dCmdHeader *header)
2416 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2417 container_of(header, typeof(*cmd), header);
2418 struct vmw_resource *ret;
2420 ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2421 cmd->body.depthStencilViewId);
2423 return PTR_ERR_OR_ZERO(ret);
2426 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2427 struct vmw_sw_context *sw_context,
2428 SVGA3dCmdHeader *header)
2430 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2431 struct vmw_resource *srf;
2432 struct vmw_resource *res;
2433 enum vmw_view_type view_type;
2436 * This is based on the fact that all affected define commands have the
2437 * same initial command body layout.
2440 SVGA3dCmdHeader header;
2448 view_type = vmw_view_cmd_to_type(header->id);
2449 if (view_type == vmw_view_max)
2452 cmd = container_of(header, typeof(*cmd), header);
2453 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2454 VMW_DEBUG_USER("Invalid surface id.\n");
2457 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2458 VMW_RES_DIRTY_NONE, user_surface_converter,
2460 if (unlikely(ret != 0))
2463 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2464 ret = vmw_cotable_notify(res, cmd->defined_id);
2465 if (unlikely(ret != 0))
2468 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2469 cmd->defined_id, header,
2470 header->size + sizeof(*header),
2471 &sw_context->staged_cmd_res);
2475 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2477 * @dev_priv: Pointer to a device private struct.
2478 * @sw_context: The software context being used for this batch.
2479 * @header: Pointer to the command header in the command stream.
2481 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2482 struct vmw_sw_context *sw_context,
2483 SVGA3dCmdHeader *header)
2485 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2486 struct vmw_ctx_bindinfo_so_target binding;
2487 struct vmw_resource *res;
2489 SVGA3dCmdHeader header;
2490 SVGA3dCmdDXSetSOTargets body;
2491 SVGA3dSoTarget targets[];
2498 cmd = container_of(header, typeof(*cmd), header);
2499 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2501 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2502 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2506 for (i = 0; i < num; i++) {
2507 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2509 user_surface_converter,
2510 &cmd->targets[i].sid, &res);
2511 if (unlikely(ret != 0))
2514 binding.bi.ctx = ctx_node->ctx;
2515 binding.bi.res = res;
2516 binding.bi.bt = vmw_ctx_binding_so_target;
2517 binding.offset = cmd->targets[i].offset;
2518 binding.size = cmd->targets[i].sizeInBytes;
2521 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2527 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2528 struct vmw_sw_context *sw_context,
2529 SVGA3dCmdHeader *header)
2531 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2532 struct vmw_resource *res;
2534 * This is based on the fact that all affected define commands have
2535 * the same initial command body layout.
2538 SVGA3dCmdHeader header;
2541 enum vmw_so_type so_type;
2547 so_type = vmw_so_cmd_to_type(header->id);
2548 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2549 cmd = container_of(header, typeof(*cmd), header);
2550 ret = vmw_cotable_notify(res, cmd->defined_id);
2556 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2559 * @dev_priv: Pointer to a device private struct.
2560 * @sw_context: The software context being used for this batch.
2561 * @header: Pointer to the command header in the command stream.
2563 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2564 struct vmw_sw_context *sw_context,
2565 SVGA3dCmdHeader *header)
2568 SVGA3dCmdHeader header;
2570 SVGA3dCmdDXReadbackSubResource r_body;
2571 SVGA3dCmdDXInvalidateSubResource i_body;
2572 SVGA3dCmdDXUpdateSubResource u_body;
2573 SVGA3dSurfaceId sid;
2577 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2578 offsetof(typeof(*cmd), sid));
2579 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2580 offsetof(typeof(*cmd), sid));
2581 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2582 offsetof(typeof(*cmd), sid));
2584 cmd = container_of(header, typeof(*cmd), header);
2585 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2586 VMW_RES_DIRTY_NONE, user_surface_converter,
2590 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2591 struct vmw_sw_context *sw_context,
2592 SVGA3dCmdHeader *header)
2594 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2603 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2604 * resource for removal.
2606 * @dev_priv: Pointer to a device private struct.
2607 * @sw_context: The software context being used for this batch.
2608 * @header: Pointer to the command header in the command stream.
2610 * Check that the view exists, and if it was not created using this command
2611 * batch, conditionally make this command a NOP.
2613 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2614 struct vmw_sw_context *sw_context,
2615 SVGA3dCmdHeader *header)
2617 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2619 SVGA3dCmdHeader header;
2620 union vmw_view_destroy body;
2621 } *cmd = container_of(header, typeof(*cmd), header);
2622 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2623 struct vmw_resource *view;
2629 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2630 &sw_context->staged_cmd_res, &view);
2635 * If the view wasn't created during this command batch, it might
2636 * have been removed due to a context swapout, so add a
2637 * relocation to conditionally make this command a NOP to avoid
2640 return vmw_resource_relocation_add(sw_context, view,
2641 vmw_ptr_diff(sw_context->buf_start,
2643 vmw_res_rel_cond_nop);
2647 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2649 * @dev_priv: Pointer to a device private struct.
2650 * @sw_context: The software context being used for this batch.
2651 * @header: Pointer to the command header in the command stream.
2653 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2654 struct vmw_sw_context *sw_context,
2655 SVGA3dCmdHeader *header)
2657 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2658 struct vmw_resource *res;
2659 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2660 container_of(header, typeof(*cmd), header);
2666 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2667 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2671 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2672 cmd->body.shaderId, cmd->body.type,
2673 &sw_context->staged_cmd_res);
2677 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2679 * @dev_priv: Pointer to a device private struct.
2680 * @sw_context: The software context being used for this batch.
2681 * @header: Pointer to the command header in the command stream.
2683 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2684 struct vmw_sw_context *sw_context,
2685 SVGA3dCmdHeader *header)
2687 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2688 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2689 container_of(header, typeof(*cmd), header);
2695 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2696 &sw_context->staged_cmd_res);
2702 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2704 * @dev_priv: Pointer to a device private struct.
2705 * @sw_context: The software context being used for this batch.
2706 * @header: Pointer to the command header in the command stream.
2708 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2709 struct vmw_sw_context *sw_context,
2710 SVGA3dCmdHeader *header)
2712 struct vmw_resource *ctx;
2713 struct vmw_resource *res;
2714 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2715 container_of(header, typeof(*cmd), header);
2718 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2719 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2721 user_context_converter, &cmd->body.cid,
2726 struct vmw_ctx_validation_info *ctx_node =
2727 VMW_GET_CTX_NODE(sw_context);
2732 ctx = ctx_node->ctx;
2735 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2737 VMW_DEBUG_USER("Could not find shader to bind.\n");
2738 return PTR_ERR(res);
2741 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2742 VMW_RES_DIRTY_NONE);
2744 VMW_DEBUG_USER("Error creating resource validation node.\n");
2748 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2750 cmd->body.offsetInBytes);
2754 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2756 * @dev_priv: Pointer to a device private struct.
2757 * @sw_context: The software context being used for this batch.
2758 * @header: Pointer to the command header in the command stream.
2760 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2761 struct vmw_sw_context *sw_context,
2762 SVGA3dCmdHeader *header)
2764 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2765 container_of(header, typeof(*cmd), header);
2766 struct vmw_resource *view;
2767 struct vmw_res_cache_entry *rcache;
2769 view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2770 cmd->body.shaderResourceViewId);
2772 return PTR_ERR(view);
2775 * Normally the shader-resource view is not gpu-dirtying, but for
2776 * this particular command it is...
2777 * So mark the last looked-up surface, which is the surface
2778 * the view points to, gpu-dirty.
2780 rcache = &sw_context->res_cache[vmw_res_surface];
2781 vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2787 * vmw_cmd_dx_transfer_from_buffer - Validate
2788 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2790 * @dev_priv: Pointer to a device private struct.
2791 * @sw_context: The software context being used for this batch.
2792 * @header: Pointer to the command header in the command stream.
2794 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2795 struct vmw_sw_context *sw_context,
2796 SVGA3dCmdHeader *header)
2798 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2799 container_of(header, typeof(*cmd), header);
2802 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2803 VMW_RES_DIRTY_NONE, user_surface_converter,
2804 &cmd->body.srcSid, NULL);
2808 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2809 VMW_RES_DIRTY_SET, user_surface_converter,
2810 &cmd->body.destSid, NULL);
2814 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2816 * @dev_priv: Pointer to a device private struct.
2817 * @sw_context: The software context being used for this batch.
2818 * @header: Pointer to the command header in the command stream.
2820 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2821 struct vmw_sw_context *sw_context,
2822 SVGA3dCmdHeader *header)
2824 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2825 container_of(header, typeof(*cmd), header);
2827 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2830 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2831 VMW_RES_DIRTY_SET, user_surface_converter,
2832 &cmd->body.surface.sid, NULL);
2835 static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2836 struct vmw_sw_context *sw_context,
2837 SVGA3dCmdHeader *header)
2839 if (!has_sm5_context(dev_priv))
2845 static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2846 struct vmw_sw_context *sw_context,
2847 SVGA3dCmdHeader *header)
2849 if (!has_sm5_context(dev_priv))
2852 return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2855 static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2856 struct vmw_sw_context *sw_context,
2857 SVGA3dCmdHeader *header)
2859 if (!has_sm5_context(dev_priv))
2862 return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2865 static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2866 struct vmw_sw_context *sw_context,
2867 SVGA3dCmdHeader *header)
2870 SVGA3dCmdHeader header;
2871 SVGA3dCmdDXClearUAViewUint body;
2872 } *cmd = container_of(header, typeof(*cmd), header);
2873 struct vmw_resource *ret;
2875 if (!has_sm5_context(dev_priv))
2878 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2879 cmd->body.uaViewId);
2881 return PTR_ERR_OR_ZERO(ret);
2884 static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2885 struct vmw_sw_context *sw_context,
2886 SVGA3dCmdHeader *header)
2889 SVGA3dCmdHeader header;
2890 SVGA3dCmdDXClearUAViewFloat body;
2891 } *cmd = container_of(header, typeof(*cmd), header);
2892 struct vmw_resource *ret;
2894 if (!has_sm5_context(dev_priv))
2897 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2898 cmd->body.uaViewId);
2900 return PTR_ERR_OR_ZERO(ret);
2903 static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2904 struct vmw_sw_context *sw_context,
2905 SVGA3dCmdHeader *header)
2908 SVGA3dCmdHeader header;
2909 SVGA3dCmdDXSetUAViews body;
2910 } *cmd = container_of(header, typeof(*cmd), header);
2911 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2912 sizeof(SVGA3dUAViewId);
2915 if (!has_sm5_context(dev_priv))
2918 if (num_uav > SVGA3D_MAX_UAVIEWS) {
2919 VMW_DEBUG_USER("Invalid UAV binding.\n");
2923 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2924 vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2929 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2930 cmd->body.uavSpliceIndex);
2935 static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2936 struct vmw_sw_context *sw_context,
2937 SVGA3dCmdHeader *header)
2940 SVGA3dCmdHeader header;
2941 SVGA3dCmdDXSetCSUAViews body;
2942 } *cmd = container_of(header, typeof(*cmd), header);
2943 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2944 sizeof(SVGA3dUAViewId);
2947 if (!has_sm5_context(dev_priv))
2950 if (num_uav > SVGA3D_MAX_UAVIEWS) {
2951 VMW_DEBUG_USER("Invalid UAV binding.\n");
2955 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2956 vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2961 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2962 cmd->body.startIndex);
2967 static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2968 struct vmw_sw_context *sw_context,
2969 SVGA3dCmdHeader *header)
2971 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2972 struct vmw_resource *res;
2974 SVGA3dCmdHeader header;
2975 SVGA3dCmdDXDefineStreamOutputWithMob body;
2976 } *cmd = container_of(header, typeof(*cmd), header);
2979 if (!has_sm5_context(dev_priv))
2983 DRM_ERROR("DX Context not set.\n");
2987 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
2988 ret = vmw_cotable_notify(res, cmd->body.soid);
2992 return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
2994 &sw_context->staged_cmd_res);
2997 static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
2998 struct vmw_sw_context *sw_context,
2999 SVGA3dCmdHeader *header)
3001 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3002 struct vmw_resource *res;
3004 SVGA3dCmdHeader header;
3005 SVGA3dCmdDXDestroyStreamOutput body;
3006 } *cmd = container_of(header, typeof(*cmd), header);
3009 DRM_ERROR("DX Context not set.\n");
3014 * When device does not support SM5 then streamoutput with mob command is
3015 * not available to user-space. Simply return in this case.
3017 if (!has_sm5_context(dev_priv))
3021 * With SM5 capable device if lookup fails then user-space probably used
3022 * old streamoutput define command. Return without an error.
3024 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3029 return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3030 &sw_context->staged_cmd_res);
3033 static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3034 struct vmw_sw_context *sw_context,
3035 SVGA3dCmdHeader *header)
3037 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3038 struct vmw_resource *res;
3040 SVGA3dCmdHeader header;
3041 SVGA3dCmdDXBindStreamOutput body;
3042 } *cmd = container_of(header, typeof(*cmd), header);
3045 if (!has_sm5_context(dev_priv))
3049 DRM_ERROR("DX Context not set.\n");
3053 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3056 DRM_ERROR("Could not find streamoutput to bind.\n");
3057 return PTR_ERR(res);
3060 vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3062 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3063 VMW_RES_DIRTY_NONE);
3065 DRM_ERROR("Error creating resource validation node.\n");
3069 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3071 cmd->body.offsetInBytes);
3074 static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3075 struct vmw_sw_context *sw_context,
3076 SVGA3dCmdHeader *header)
3078 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3079 struct vmw_resource *res;
3080 struct vmw_ctx_bindinfo_so binding;
3082 SVGA3dCmdHeader header;
3083 SVGA3dCmdDXSetStreamOutput body;
3084 } *cmd = container_of(header, typeof(*cmd), header);
3088 DRM_ERROR("DX Context not set.\n");
3092 if (cmd->body.soid == SVGA3D_INVALID_ID)
3096 * When device does not support SM5 then streamoutput with mob command is
3097 * not available to user-space. Simply return in this case.
3099 if (!has_sm5_context(dev_priv))
3103 * With SM5 capable device if lookup fails then user-space probably used
3104 * old streamoutput define command. Return without an error.
3106 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3112 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3113 VMW_RES_DIRTY_NONE);
3115 DRM_ERROR("Error creating resource validation node.\n");
3119 binding.bi.ctx = ctx_node->ctx;
3120 binding.bi.res = res;
3121 binding.bi.bt = vmw_ctx_binding_so;
3122 binding.slot = 0; /* Only one SO set to context at a time. */
3124 vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3130 static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3131 struct vmw_sw_context *sw_context,
3132 SVGA3dCmdHeader *header)
3134 struct vmw_draw_indexed_instanced_indirect_cmd {
3135 SVGA3dCmdHeader header;
3136 SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3137 } *cmd = container_of(header, typeof(*cmd), header);
3139 if (!has_sm5_context(dev_priv))
3142 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3143 VMW_RES_DIRTY_NONE, user_surface_converter,
3144 &cmd->body.argsBufferSid, NULL);
3147 static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3148 struct vmw_sw_context *sw_context,
3149 SVGA3dCmdHeader *header)
3151 struct vmw_draw_instanced_indirect_cmd {
3152 SVGA3dCmdHeader header;
3153 SVGA3dCmdDXDrawInstancedIndirect body;
3154 } *cmd = container_of(header, typeof(*cmd), header);
3156 if (!has_sm5_context(dev_priv))
3159 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3160 VMW_RES_DIRTY_NONE, user_surface_converter,
3161 &cmd->body.argsBufferSid, NULL);
3164 static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3165 struct vmw_sw_context *sw_context,
3166 SVGA3dCmdHeader *header)
3168 struct vmw_dispatch_indirect_cmd {
3169 SVGA3dCmdHeader header;
3170 SVGA3dCmdDXDispatchIndirect body;
3171 } *cmd = container_of(header, typeof(*cmd), header);
3173 if (!has_sm5_context(dev_priv))
3176 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3177 VMW_RES_DIRTY_NONE, user_surface_converter,
3178 &cmd->body.argsBufferSid, NULL);
3181 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3182 struct vmw_sw_context *sw_context,
3183 void *buf, uint32_t *size)
3185 uint32_t size_remaining = *size;
3188 cmd_id = ((uint32_t *)buf)[0];
3190 case SVGA_CMD_UPDATE:
3191 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3193 case SVGA_CMD_DEFINE_GMRFB:
3194 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3196 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3197 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3199 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3200 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3203 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3207 if (*size > size_remaining) {
3208 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3213 if (unlikely(!sw_context->kernel)) {
3214 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3218 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3219 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3224 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3225 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3226 false, false, false),
3227 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3228 false, false, false),
3229 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3230 true, false, false),
3231 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3232 true, false, false),
3233 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3234 true, false, false),
3235 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3236 false, false, false),
3237 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3238 false, false, false),
3239 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3240 true, false, false),
3241 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3242 true, false, false),
3243 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3244 true, false, false),
3245 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3246 &vmw_cmd_set_render_target_check, true, false, false),
3247 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3248 true, false, false),
3249 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3250 true, false, false),
3251 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3252 true, false, false),
3253 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3254 true, false, false),
3255 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3256 true, false, false),
3257 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3258 true, false, false),
3259 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3260 true, false, false),
3261 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3262 false, false, false),
3263 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3264 true, false, false),
3265 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3266 true, false, false),
3267 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3268 true, false, false),
3269 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3270 true, false, false),
3271 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3272 true, false, false),
3273 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3274 true, false, false),
3275 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3276 true, false, false),
3277 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3278 true, false, false),
3279 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3280 true, false, false),
3281 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3282 true, false, false),
3283 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3284 &vmw_cmd_blt_surf_screen_check, false, false, false),
3285 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3286 false, false, false),
3287 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3288 false, false, false),
3289 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3290 false, false, false),
3291 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3292 false, false, false),
3293 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3294 false, false, false),
3295 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3296 false, false, false),
3297 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3298 false, false, false),
3299 VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3300 VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3301 VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3303 VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3305 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3306 false, false, true),
3307 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3308 false, false, true),
3309 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3310 false, false, true),
3311 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3312 false, false, true),
3313 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3314 false, false, true),
3315 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3316 false, false, true),
3317 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3318 false, false, true),
3319 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3320 false, false, true),
3321 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3323 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3324 false, false, true),
3325 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3327 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3328 &vmw_cmd_update_gb_surface, true, false, true),
3329 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3330 &vmw_cmd_readback_gb_image, true, false, true),
3331 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3332 &vmw_cmd_readback_gb_surface, true, false, true),
3333 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3334 &vmw_cmd_invalidate_gb_image, true, false, true),
3335 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3336 &vmw_cmd_invalidate_gb_surface, true, false, true),
3337 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3338 false, false, true),
3339 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3340 false, false, true),
3341 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3342 false, false, true),
3343 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3344 false, false, true),
3345 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3346 false, false, true),
3347 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3348 false, false, true),
3349 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3351 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3352 false, false, true),
3353 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3354 false, false, false),
3355 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3357 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3359 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3361 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3363 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3365 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3366 false, false, true),
3367 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3368 false, false, true),
3369 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3370 false, false, true),
3371 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3372 false, false, true),
3373 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3374 false, false, true),
3375 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3376 false, false, true),
3377 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3378 false, false, true),
3379 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3380 false, false, true),
3381 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3382 false, false, true),
3383 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3384 false, false, true),
3385 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3387 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3388 false, false, true),
3389 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3390 false, false, true),
3391 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3392 false, false, true),
3393 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3394 false, false, true),
3397 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3398 false, false, true),
3399 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3400 false, false, true),
3401 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3402 false, false, true),
3403 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3404 false, false, true),
3405 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3406 false, false, true),
3407 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3408 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3409 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3410 &vmw_cmd_dx_set_shader_res, true, false, true),
3411 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3413 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3415 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3417 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3419 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3421 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3422 &vmw_cmd_dx_cid_check, true, false, true),
3423 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3425 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3426 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3427 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3428 &vmw_cmd_dx_set_index_buffer, true, false, true),
3429 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3430 &vmw_cmd_dx_set_rendertargets, true, false, true),
3431 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3433 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3434 &vmw_cmd_dx_cid_check, true, false, true),
3435 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3436 &vmw_cmd_dx_cid_check, true, false, true),
3437 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3439 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3441 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3443 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3444 &vmw_cmd_dx_cid_check, true, false, true),
3445 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3447 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3449 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3451 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3453 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3455 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3457 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3458 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3459 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3460 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3461 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3463 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3465 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3466 &vmw_cmd_dx_check_subresource, true, false, true),
3467 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3468 &vmw_cmd_dx_check_subresource, true, false, true),
3469 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3470 &vmw_cmd_dx_check_subresource, true, false, true),
3471 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3472 &vmw_cmd_dx_view_define, true, false, true),
3473 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3474 &vmw_cmd_dx_view_remove, true, false, true),
3475 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3476 &vmw_cmd_dx_view_define, true, false, true),
3477 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3478 &vmw_cmd_dx_view_remove, true, false, true),
3479 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3480 &vmw_cmd_dx_view_define, true, false, true),
3481 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3482 &vmw_cmd_dx_view_remove, true, false, true),
3483 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3484 &vmw_cmd_dx_so_define, true, false, true),
3485 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3486 &vmw_cmd_dx_cid_check, true, false, true),
3487 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3488 &vmw_cmd_dx_so_define, true, false, true),
3489 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3490 &vmw_cmd_dx_cid_check, true, false, true),
3491 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3492 &vmw_cmd_dx_so_define, true, false, true),
3493 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3494 &vmw_cmd_dx_cid_check, true, false, true),
3495 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3496 &vmw_cmd_dx_so_define, true, false, true),
3497 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3498 &vmw_cmd_dx_cid_check, true, false, true),
3499 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3500 &vmw_cmd_dx_so_define, true, false, true),
3501 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3502 &vmw_cmd_dx_cid_check, true, false, true),
3503 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3504 &vmw_cmd_dx_define_shader, true, false, true),
3505 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3506 &vmw_cmd_dx_destroy_shader, true, false, true),
3507 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3508 &vmw_cmd_dx_bind_shader, true, false, true),
3509 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3510 &vmw_cmd_dx_so_define, true, false, true),
3511 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3512 &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3513 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3514 &vmw_cmd_dx_set_streamoutput, true, false, true),
3515 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3516 &vmw_cmd_dx_set_so_targets, true, false, true),
3517 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3518 &vmw_cmd_dx_cid_check, true, false, true),
3519 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3520 &vmw_cmd_dx_cid_check, true, false, true),
3521 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3522 &vmw_cmd_buffer_copy_check, true, false, true),
3523 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3524 &vmw_cmd_pred_copy_check, true, false, true),
3525 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3526 &vmw_cmd_dx_transfer_from_buffer,
3528 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3534 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3536 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3538 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3540 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3541 &vmw_cmd_clear_uav_float, true, false, true),
3542 VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3544 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3546 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3547 &vmw_cmd_indexed_instanced_indirect, true, false, true),
3548 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3549 &vmw_cmd_instanced_indirect, true, false, true),
3550 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3551 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3552 &vmw_cmd_dispatch_indirect, true, false, true),
3553 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3555 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3556 &vmw_cmd_sm5_view_define, true, false, true),
3557 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3558 &vmw_cmd_dx_define_streamoutput, true, false, true),
3559 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3560 &vmw_cmd_dx_bind_streamoutput, true, false, true),
3563 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3565 u32 cmd_id = ((u32 *) buf)[0];
3567 if (cmd_id >= SVGA_CMD_MAX) {
3568 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3569 const struct vmw_cmd_entry *entry;
3571 *size = header->size + sizeof(SVGA3dCmdHeader);
3572 cmd_id = header->id;
3573 if (cmd_id >= SVGA_3D_CMD_MAX)
3576 cmd_id -= SVGA_3D_CMD_BASE;
3577 entry = &vmw_cmd_entries[cmd_id];
3578 *cmd = entry->cmd_name;
3583 case SVGA_CMD_UPDATE:
3584 *cmd = "SVGA_CMD_UPDATE";
3585 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3587 case SVGA_CMD_DEFINE_GMRFB:
3588 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3589 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3591 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3592 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3593 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3595 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3596 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3597 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3608 static int vmw_cmd_check(struct vmw_private *dev_priv,
3609 struct vmw_sw_context *sw_context, void *buf,
3613 uint32_t size_remaining = *size;
3614 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3616 const struct vmw_cmd_entry *entry;
3617 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3619 cmd_id = ((uint32_t *)buf)[0];
3620 /* Handle any none 3D commands */
3621 if (unlikely(cmd_id < SVGA_CMD_MAX))
3622 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3625 cmd_id = header->id;
3626 *size = header->size + sizeof(SVGA3dCmdHeader);
3628 cmd_id -= SVGA_3D_CMD_BASE;
3629 if (unlikely(*size > size_remaining))
3632 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3635 entry = &vmw_cmd_entries[cmd_id];
3636 if (unlikely(!entry->func))
3639 if (unlikely(!entry->user_allow && !sw_context->kernel))
3640 goto out_privileged;
3642 if (unlikely(entry->gb_disable && gb))
3645 if (unlikely(entry->gb_enable && !gb))
3648 ret = entry->func(dev_priv, sw_context, header);
3649 if (unlikely(ret != 0)) {
3650 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3651 cmd_id + SVGA_3D_CMD_BASE, ret);
3657 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3658 cmd_id + SVGA_3D_CMD_BASE);
3661 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3662 cmd_id + SVGA_3D_CMD_BASE);
3665 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3666 cmd_id + SVGA_3D_CMD_BASE);
3669 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3670 cmd_id + SVGA_3D_CMD_BASE);
3674 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3675 struct vmw_sw_context *sw_context, void *buf,
3678 int32_t cur_size = size;
3681 sw_context->buf_start = buf;
3683 while (cur_size > 0) {
3685 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3686 if (unlikely(ret != 0))
3688 buf = (void *)((unsigned long) buf + size);
3692 if (unlikely(cur_size != 0)) {
3693 VMW_DEBUG_USER("Command verifier out of sync.\n");
3700 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3702 /* Memory is validation context memory, so no need to free it */
3703 INIT_LIST_HEAD(&sw_context->bo_relocations);
3706 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3708 struct vmw_relocation *reloc;
3709 struct ttm_buffer_object *bo;
3711 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3712 bo = &reloc->vbo->base;
3713 switch (bo->resource->mem_type) {
3715 reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3716 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3719 reloc->location->gmrId = bo->resource->start;
3722 *reloc->mob_loc = bo->resource->start;
3728 vmw_free_relocations(sw_context);
3731 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3734 if (likely(sw_context->cmd_bounce_size >= size))
3737 if (sw_context->cmd_bounce_size == 0)
3738 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3740 while (sw_context->cmd_bounce_size < size) {
3741 sw_context->cmd_bounce_size =
3742 PAGE_ALIGN(sw_context->cmd_bounce_size +
3743 (sw_context->cmd_bounce_size >> 1));
3746 vfree(sw_context->cmd_bounce);
3747 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3749 if (sw_context->cmd_bounce == NULL) {
3750 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3751 sw_context->cmd_bounce_size = 0;
3759 * vmw_execbuf_fence_commands - create and submit a command stream fence
3761 * Creates a fence object and submits a command stream marker.
3762 * If this fails for some reason, We sync the fifo and return NULL.
3763 * It is then safe to fence buffers with a NULL pointer.
3765 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3766 * userspace handle if @p_handle is not NULL, otherwise not.
3769 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3770 struct vmw_private *dev_priv,
3771 struct vmw_fence_obj **p_fence,
3776 bool synced = false;
3778 /* p_handle implies file_priv. */
3779 BUG_ON(p_handle != NULL && file_priv == NULL);
3781 ret = vmw_cmd_send_fence(dev_priv, &sequence);
3782 if (unlikely(ret != 0)) {
3783 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3787 if (p_handle != NULL)
3788 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3789 sequence, p_fence, p_handle);
3791 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3793 if (unlikely(ret != 0 && !synced)) {
3794 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3795 false, VMW_FENCE_WAIT_TIMEOUT);
3803 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3805 * @dev_priv: Pointer to a vmw_private struct.
3806 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3807 * @ret: Return value from fence object creation.
3808 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3809 * the information should be copied.
3810 * @fence: Pointer to the fenc object.
3811 * @fence_handle: User-space fence handle.
3812 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3813 * @sync_file: Only used to clean up in case of an error in this function.
3815 * This function copies fence information to user-space. If copying fails, the
3816 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3817 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3818 * will hopefully be detected.
3820 * Also if copying fails, user-space will be unable to signal the fence object
3821 * so we wait for it immediately, and then unreference the user-space reference.
3824 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3825 struct vmw_fpriv *vmw_fp, int ret,
3826 struct drm_vmw_fence_rep __user *user_fence_rep,
3827 struct vmw_fence_obj *fence, uint32_t fence_handle,
3828 int32_t out_fence_fd, struct sync_file *sync_file)
3830 struct drm_vmw_fence_rep fence_rep;
3832 if (user_fence_rep == NULL)
3835 memset(&fence_rep, 0, sizeof(fence_rep));
3837 fence_rep.error = ret;
3838 fence_rep.fd = out_fence_fd;
3840 BUG_ON(fence == NULL);
3842 fence_rep.handle = fence_handle;
3843 fence_rep.seqno = fence->base.seqno;
3844 vmw_update_seqno(dev_priv);
3845 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3849 * copy_to_user errors will be detected by user space not seeing
3850 * fence_rep::error filled in. Typically user-space would have pre-set
3851 * that member to -EFAULT.
3853 ret = copy_to_user(user_fence_rep, &fence_rep,
3857 * User-space lost the fence object. We need to sync and unreference the
3860 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3862 fput(sync_file->file);
3864 if (fence_rep.fd != -1) {
3865 put_unused_fd(fence_rep.fd);
3869 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3871 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3872 (void) vmw_fence_obj_wait(fence, false, false,
3873 VMW_FENCE_WAIT_TIMEOUT);
3878 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3880 * @dev_priv: Pointer to a device private structure.
3881 * @kernel_commands: Pointer to the unpatched command batch.
3882 * @command_size: Size of the unpatched command batch.
3883 * @sw_context: Structure holding the relocation lists.
3885 * Side effects: If this function returns 0, then the command batch pointed to
3886 * by @kernel_commands will have been modified.
3888 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3889 void *kernel_commands, u32 command_size,
3890 struct vmw_sw_context *sw_context)
3894 if (sw_context->dx_ctx_node)
3895 cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3896 sw_context->dx_ctx_node->ctx->id);
3898 cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3903 vmw_apply_relocations(sw_context);
3904 memcpy(cmd, kernel_commands, command_size);
3905 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3906 vmw_resource_relocations_free(&sw_context->res_relocations);
3907 vmw_cmd_commit(dev_priv, command_size);
3913 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3914 * command buffer manager.
3916 * @dev_priv: Pointer to a device private structure.
3917 * @header: Opaque handle to the command buffer allocation.
3918 * @command_size: Size of the unpatched command batch.
3919 * @sw_context: Structure holding the relocation lists.
3921 * Side effects: If this function returns 0, then the command buffer represented
3922 * by @header will have been modified.
3924 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3925 struct vmw_cmdbuf_header *header,
3927 struct vmw_sw_context *sw_context)
3929 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3931 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3934 vmw_apply_relocations(sw_context);
3935 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3936 vmw_resource_relocations_free(&sw_context->res_relocations);
3937 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3943 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3944 * submission using a command buffer.
3946 * @dev_priv: Pointer to a device private structure.
3947 * @user_commands: User-space pointer to the commands to be submitted.
3948 * @command_size: Size of the unpatched command batch.
3949 * @header: Out parameter returning the opaque pointer to the command buffer.
3951 * This function checks whether we can use the command buffer manager for
3952 * submission and if so, creates a command buffer of suitable size and copies
3953 * the user data into that buffer.
3955 * On successful return, the function returns a pointer to the data in the
3956 * command buffer and *@header is set to non-NULL.
3958 * @kernel_commands: If command buffers could not be used, the function will
3959 * return the value of @kernel_commands on function call. That value may be
3960 * NULL. In that case, the value of *@header will be set to NULL.
3962 * If an error is encountered, the function will return a pointer error value.
3963 * If the function is interrupted by a signal while sleeping, it will return
3964 * -ERESTARTSYS casted to a pointer error value.
3966 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3967 void __user *user_commands,
3968 void *kernel_commands, u32 command_size,
3969 struct vmw_cmdbuf_header **header)
3975 if (command_size > SVGA_CB_MAX_SIZE) {
3976 VMW_DEBUG_USER("Command buffer is too large.\n");
3977 return ERR_PTR(-EINVAL);
3980 if (!dev_priv->cman || kernel_commands)
3981 return kernel_commands;
3983 /* If possible, add a little space for fencing. */
3984 cmdbuf_size = command_size + 512;
3985 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3986 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3988 if (IS_ERR(kernel_commands))
3989 return kernel_commands;
3991 ret = copy_from_user(kernel_commands, user_commands, command_size);
3993 VMW_DEBUG_USER("Failed copying commands.\n");
3994 vmw_cmdbuf_header_free(*header);
3996 return ERR_PTR(-EFAULT);
3999 return kernel_commands;
4002 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4003 struct vmw_sw_context *sw_context,
4006 struct vmw_resource *res;
4010 if (handle == SVGA3D_INVALID_ID)
4013 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4014 ret = vmw_validation_preload_res(sw_context->ctx, size);
4018 res = vmw_user_resource_noref_lookup_handle
4019 (dev_priv, sw_context->fp->tfile, handle,
4020 user_context_converter);
4022 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4023 (unsigned int) handle);
4024 return PTR_ERR(res);
4027 ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
4028 if (unlikely(ret != 0))
4031 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4032 sw_context->man = vmw_context_res_man(res);
4037 int vmw_execbuf_process(struct drm_file *file_priv,
4038 struct vmw_private *dev_priv,
4039 void __user *user_commands, void *kernel_commands,
4040 uint32_t command_size, uint64_t throttle_us,
4041 uint32_t dx_context_handle,
4042 struct drm_vmw_fence_rep __user *user_fence_rep,
4043 struct vmw_fence_obj **out_fence, uint32_t flags)
4045 struct vmw_sw_context *sw_context = &dev_priv->ctx;
4046 struct vmw_fence_obj *fence = NULL;
4047 struct vmw_cmdbuf_header *header;
4048 uint32_t handle = 0;
4050 int32_t out_fence_fd = -1;
4051 struct sync_file *sync_file = NULL;
4052 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
4054 vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
4056 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4057 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4058 if (out_fence_fd < 0) {
4059 VMW_DEBUG_USER("Failed to get a fence fd.\n");
4060 return out_fence_fd;
4065 VMW_DEBUG_USER("Throttling is no longer supported.\n");
4068 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4069 kernel_commands, command_size,
4071 if (IS_ERR(kernel_commands)) {
4072 ret = PTR_ERR(kernel_commands);
4073 goto out_free_fence_fd;
4076 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4079 goto out_free_header;
4082 sw_context->kernel = false;
4083 if (kernel_commands == NULL) {
4084 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4085 if (unlikely(ret != 0))
4088 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4090 if (unlikely(ret != 0)) {
4092 VMW_DEBUG_USER("Failed copying commands.\n");
4096 kernel_commands = sw_context->cmd_bounce;
4097 } else if (!header) {
4098 sw_context->kernel = true;
4101 sw_context->fp = vmw_fpriv(file_priv);
4102 INIT_LIST_HEAD(&sw_context->ctx_list);
4103 sw_context->cur_query_bo = dev_priv->pinned_bo;
4104 sw_context->last_query_ctx = NULL;
4105 sw_context->needs_post_query_barrier = false;
4106 sw_context->dx_ctx_node = NULL;
4107 sw_context->dx_query_mob = NULL;
4108 sw_context->dx_query_ctx = NULL;
4109 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4110 INIT_LIST_HEAD(&sw_context->res_relocations);
4111 INIT_LIST_HEAD(&sw_context->bo_relocations);
4113 if (sw_context->staged_bindings)
4114 vmw_binding_state_reset(sw_context->staged_bindings);
4116 if (!sw_context->res_ht_initialized) {
4117 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4118 if (unlikely(ret != 0))
4121 sw_context->res_ht_initialized = true;
4124 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4125 sw_context->ctx = &val_ctx;
4126 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4127 if (unlikely(ret != 0))
4130 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4132 if (unlikely(ret != 0))
4135 ret = vmw_resources_reserve(sw_context);
4136 if (unlikely(ret != 0))
4139 ret = vmw_validation_bo_reserve(&val_ctx, true);
4140 if (unlikely(ret != 0))
4143 ret = vmw_validation_bo_validate(&val_ctx, true);
4144 if (unlikely(ret != 0))
4147 ret = vmw_validation_res_validate(&val_ctx, true);
4148 if (unlikely(ret != 0))
4151 vmw_validation_drop_ht(&val_ctx);
4153 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4154 if (unlikely(ret != 0)) {
4159 if (dev_priv->has_mob) {
4160 ret = vmw_rebind_contexts(sw_context);
4161 if (unlikely(ret != 0))
4162 goto out_unlock_binding;
4166 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4167 command_size, sw_context);
4169 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4173 mutex_unlock(&dev_priv->binding_mutex);
4177 vmw_query_bo_switch_commit(dev_priv, sw_context);
4178 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4179 (user_fence_rep) ? &handle : NULL);
4181 * This error is harmless, because if fence submission fails,
4182 * vmw_fifo_send_fence will sync. The error will be propagated to
4183 * user-space in @fence_rep
4186 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4188 vmw_execbuf_bindings_commit(sw_context, false);
4189 vmw_bind_dx_query_mob(sw_context);
4190 vmw_validation_res_unreserve(&val_ctx, false);
4192 vmw_validation_bo_fence(sw_context->ctx, fence);
4194 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4195 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4198 * If anything fails here, give up trying to export the fence and do a
4199 * sync since the user mode will not be able to sync the fence itself.
4200 * This ensures we are still functionally correct.
4202 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4204 sync_file = sync_file_create(&fence->base);
4206 VMW_DEBUG_USER("Sync file create failed for fence\n");
4207 put_unused_fd(out_fence_fd);
4210 (void) vmw_fence_obj_wait(fence, false, false,
4211 VMW_FENCE_WAIT_TIMEOUT);
4213 /* Link the fence with the FD created earlier */
4214 fd_install(out_fence_fd, sync_file->file);
4218 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4219 user_fence_rep, fence, handle, out_fence_fd,
4222 /* Don't unreference when handing fence out */
4223 if (unlikely(out_fence != NULL)) {
4226 } else if (likely(fence != NULL)) {
4227 vmw_fence_obj_unreference(&fence);
4230 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4231 mutex_unlock(&dev_priv->cmdbuf_mutex);
4234 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4235 * in resource destruction paths.
4237 vmw_validation_unref_lists(&val_ctx);
4242 mutex_unlock(&dev_priv->binding_mutex);
4244 vmw_validation_bo_backoff(&val_ctx);
4246 vmw_execbuf_bindings_commit(sw_context, true);
4247 vmw_validation_res_unreserve(&val_ctx, true);
4248 vmw_resource_relocations_free(&sw_context->res_relocations);
4249 vmw_free_relocations(sw_context);
4250 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4251 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4253 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4254 vmw_validation_drop_ht(&val_ctx);
4255 WARN_ON(!list_empty(&sw_context->ctx_list));
4256 mutex_unlock(&dev_priv->cmdbuf_mutex);
4259 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4260 * in resource destruction paths.
4262 vmw_validation_unref_lists(&val_ctx);
4265 vmw_cmdbuf_header_free(header);
4267 if (out_fence_fd >= 0)
4268 put_unused_fd(out_fence_fd);
4274 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4276 * @dev_priv: The device private structure.
4278 * This function is called to idle the fifo and unpin the query buffer if the
4279 * normal way to do this hits an error, which should typically be extremely
4282 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4284 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4286 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4287 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4288 if (dev_priv->dummy_query_bo_pinned) {
4289 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4290 dev_priv->dummy_query_bo_pinned = false;
4296 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4299 * @dev_priv: The device private structure.
4300 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4301 * query barrier that flushes all queries touching the current buffer pointed to
4302 * by @dev_priv->pinned_bo
4304 * This function should be used to unpin the pinned query bo, or as a query
4305 * barrier when we need to make sure that all queries have finished before the
4306 * next fifo command. (For example on hardware context destructions where the
4307 * hardware may otherwise leak unfinished queries).
4309 * This function does not return any failure codes, but make attempts to do safe
4310 * unpinning in case of errors.
4312 * The function will synchronize on the previous query barrier, and will thus
4313 * not finish until that barrier has executed.
4315 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4316 * calling this function.
4318 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4319 struct vmw_fence_obj *fence)
4322 struct vmw_fence_obj *lfence = NULL;
4323 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4325 if (dev_priv->pinned_bo == NULL)
4328 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
4331 goto out_no_reserve;
4333 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
4336 goto out_no_reserve;
4338 ret = vmw_validation_bo_reserve(&val_ctx, false);
4340 goto out_no_reserve;
4342 if (dev_priv->query_cid_valid) {
4343 BUG_ON(fence != NULL);
4344 ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4347 dev_priv->query_cid_valid = false;
4350 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4351 if (dev_priv->dummy_query_bo_pinned) {
4352 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4353 dev_priv->dummy_query_bo_pinned = false;
4355 if (fence == NULL) {
4356 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4360 vmw_validation_bo_fence(&val_ctx, fence);
4362 vmw_fence_obj_unreference(&lfence);
4364 vmw_validation_unref_lists(&val_ctx);
4365 vmw_bo_unreference(&dev_priv->pinned_bo);
4370 vmw_validation_bo_backoff(&val_ctx);
4372 vmw_validation_unref_lists(&val_ctx);
4373 vmw_execbuf_unpin_panic(dev_priv);
4374 vmw_bo_unreference(&dev_priv->pinned_bo);
4378 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4380 * @dev_priv: The device private structure.
4382 * This function should be used to unpin the pinned query bo, or as a query
4383 * barrier when we need to make sure that all queries have finished before the
4384 * next fifo command. (For example on hardware context destructions where the
4385 * hardware may otherwise leak unfinished queries).
4387 * This function does not return any failure codes, but make attempts to do safe
4388 * unpinning in case of errors.
4390 * The function will synchronize on the previous query barrier, and will thus
4391 * not finish until that barrier has executed.
4393 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4395 mutex_lock(&dev_priv->cmdbuf_mutex);
4396 if (dev_priv->query_cid_valid)
4397 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4398 mutex_unlock(&dev_priv->cmdbuf_mutex);
4401 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4402 struct drm_file *file_priv)
4404 struct vmw_private *dev_priv = vmw_priv(dev);
4405 struct drm_vmw_execbuf_arg *arg = data;
4407 struct dma_fence *in_fence = NULL;
4410 * Extend the ioctl argument while maintaining backwards compatibility:
4411 * We take different code paths depending on the value of arg->version.
4413 * Note: The ioctl argument is extended and zeropadded by core DRM.
4415 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4416 arg->version == 0)) {
4417 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4421 switch (arg->version) {
4423 /* For v1 core DRM have extended + zeropadded the data */
4424 arg->context_handle = (uint32_t) -1;
4428 /* For v2 and later core DRM would have correctly copied it */
4432 /* If imported a fence FD from elsewhere, then wait on it */
4433 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4434 in_fence = sync_file_get_fence(arg->imported_fence_fd);
4437 VMW_DEBUG_USER("Cannot get imported fence\n");
4441 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4446 ret = vmw_execbuf_process(file_priv, dev_priv,
4447 (void __user *)(unsigned long)arg->commands,
4448 NULL, arg->command_size, arg->throttle_us,
4449 arg->context_handle,
4450 (void __user *)(unsigned long)arg->fence_rep,
4453 if (unlikely(ret != 0))
4456 vmw_kms_cursor_post_execbuf(dev_priv);
4460 dma_fence_put(in_fence);