1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <linux/console.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/mem_encrypt.h>
34 #include <drm/drm_aperture.h>
35 #include <drm/drm_drv.h>
36 #include <drm/drm_ioctl.h>
37 #include <drm/drm_sysfs.h>
38 #include <drm/ttm/ttm_bo_driver.h>
39 #include <drm/ttm/ttm_placement.h>
40 #include <generated/utsrelease.h>
42 #include "ttm_object.h"
43 #include "vmwgfx_binding.h"
44 #include "vmwgfx_drv.h"
46 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
48 #define VMW_MIN_INITIAL_WIDTH 800
49 #define VMW_MIN_INITIAL_HEIGHT 600
51 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
55 * Fully encoded drm commands. Might move to vmw_drm.h
58 #define DRM_IOCTL_VMW_GET_PARAM \
59 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
60 struct drm_vmw_getparam_arg)
61 #define DRM_IOCTL_VMW_ALLOC_DMABUF \
62 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
63 union drm_vmw_alloc_dmabuf_arg)
64 #define DRM_IOCTL_VMW_UNREF_DMABUF \
65 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
66 struct drm_vmw_unref_dmabuf_arg)
67 #define DRM_IOCTL_VMW_CURSOR_BYPASS \
68 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
69 struct drm_vmw_cursor_bypass_arg)
71 #define DRM_IOCTL_VMW_CONTROL_STREAM \
72 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
73 struct drm_vmw_control_stream_arg)
74 #define DRM_IOCTL_VMW_CLAIM_STREAM \
75 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
76 struct drm_vmw_stream_arg)
77 #define DRM_IOCTL_VMW_UNREF_STREAM \
78 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
79 struct drm_vmw_stream_arg)
81 #define DRM_IOCTL_VMW_CREATE_CONTEXT \
82 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
83 struct drm_vmw_context_arg)
84 #define DRM_IOCTL_VMW_UNREF_CONTEXT \
85 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
86 struct drm_vmw_context_arg)
87 #define DRM_IOCTL_VMW_CREATE_SURFACE \
88 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
89 union drm_vmw_surface_create_arg)
90 #define DRM_IOCTL_VMW_UNREF_SURFACE \
91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
92 struct drm_vmw_surface_arg)
93 #define DRM_IOCTL_VMW_REF_SURFACE \
94 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
95 union drm_vmw_surface_reference_arg)
96 #define DRM_IOCTL_VMW_EXECBUF \
97 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
98 struct drm_vmw_execbuf_arg)
99 #define DRM_IOCTL_VMW_GET_3D_CAP \
100 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
101 struct drm_vmw_get_3d_cap_arg)
102 #define DRM_IOCTL_VMW_FENCE_WAIT \
103 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
104 struct drm_vmw_fence_wait_arg)
105 #define DRM_IOCTL_VMW_FENCE_SIGNALED \
106 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
107 struct drm_vmw_fence_signaled_arg)
108 #define DRM_IOCTL_VMW_FENCE_UNREF \
109 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
110 struct drm_vmw_fence_arg)
111 #define DRM_IOCTL_VMW_FENCE_EVENT \
112 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
113 struct drm_vmw_fence_event_arg)
114 #define DRM_IOCTL_VMW_PRESENT \
115 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
116 struct drm_vmw_present_arg)
117 #define DRM_IOCTL_VMW_PRESENT_READBACK \
118 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
119 struct drm_vmw_present_readback_arg)
120 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
121 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
122 struct drm_vmw_update_layout_arg)
123 #define DRM_IOCTL_VMW_CREATE_SHADER \
124 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
125 struct drm_vmw_shader_create_arg)
126 #define DRM_IOCTL_VMW_UNREF_SHADER \
127 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
128 struct drm_vmw_shader_arg)
129 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
130 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
131 union drm_vmw_gb_surface_create_arg)
132 #define DRM_IOCTL_VMW_GB_SURFACE_REF \
133 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
134 union drm_vmw_gb_surface_reference_arg)
135 #define DRM_IOCTL_VMW_SYNCCPU \
136 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
137 struct drm_vmw_synccpu_arg)
138 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
139 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
140 struct drm_vmw_context_arg)
141 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
142 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
143 union drm_vmw_gb_surface_create_ext_arg)
144 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
145 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
146 union drm_vmw_gb_surface_reference_ext_arg)
147 #define DRM_IOCTL_VMW_MSG \
148 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
149 struct drm_vmw_msg_arg)
152 * The core DRM version of this macro doesn't account for
156 #define VMW_IOCTL_DEF(ioctl, func, flags) \
157 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
163 static const struct drm_ioctl_desc vmw_ioctls[] = {
164 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
166 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
168 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
170 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
171 vmw_kms_cursor_bypass_ioctl,
174 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
176 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
178 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
181 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
183 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
185 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
187 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
189 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
191 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
193 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
195 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
196 vmw_fence_obj_signaled_ioctl,
198 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
200 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
202 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
205 /* these allow direct access to the framebuffers mark as master only */
206 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
207 DRM_MASTER | DRM_AUTH),
208 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
209 vmw_present_readback_ioctl,
210 DRM_MASTER | DRM_AUTH),
212 * The permissions of the below ioctl are overridden in
213 * vmw_generic_ioctl(). We require either
214 * DRM_MASTER or capable(CAP_SYS_ADMIN).
216 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
217 vmw_kms_update_layout_ioctl,
219 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
220 vmw_shader_define_ioctl,
222 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
223 vmw_shader_destroy_ioctl,
225 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
226 vmw_gb_surface_define_ioctl,
228 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
229 vmw_gb_surface_reference_ioctl,
231 VMW_IOCTL_DEF(VMW_SYNCCPU,
232 vmw_user_bo_synccpu_ioctl,
234 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
235 vmw_extended_context_define_ioctl,
237 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
238 vmw_gb_surface_define_ext_ioctl,
240 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
241 vmw_gb_surface_reference_ext_ioctl,
243 VMW_IOCTL_DEF(VMW_MSG,
248 static const struct pci_device_id vmw_pci_id_list[] = {
249 { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) },
250 { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA3) },
253 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
255 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
256 static int vmw_force_iommu;
257 static int vmw_restrict_iommu;
258 static int vmw_force_coherent;
259 static int vmw_restrict_dma_mask;
260 static int vmw_assume_16bpp;
262 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
263 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
266 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
267 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
268 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
269 module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
270 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
271 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
272 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
273 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
274 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
275 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
276 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
277 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
280 static void vmw_print_capabilities2(uint32_t capabilities2)
282 DRM_INFO("Capabilities2:\n");
283 if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
284 DRM_INFO(" Grow oTable.\n");
285 if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
286 DRM_INFO(" IntraSurface copy.\n");
287 if (capabilities2 & SVGA_CAP2_DX3)
291 static void vmw_print_capabilities(uint32_t capabilities)
293 DRM_INFO("Capabilities:\n");
294 if (capabilities & SVGA_CAP_RECT_COPY)
295 DRM_INFO(" Rect copy.\n");
296 if (capabilities & SVGA_CAP_CURSOR)
297 DRM_INFO(" Cursor.\n");
298 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
299 DRM_INFO(" Cursor bypass.\n");
300 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
301 DRM_INFO(" Cursor bypass 2.\n");
302 if (capabilities & SVGA_CAP_8BIT_EMULATION)
303 DRM_INFO(" 8bit emulation.\n");
304 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
305 DRM_INFO(" Alpha cursor.\n");
306 if (capabilities & SVGA_CAP_3D)
308 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
309 DRM_INFO(" Extended Fifo.\n");
310 if (capabilities & SVGA_CAP_MULTIMON)
311 DRM_INFO(" Multimon.\n");
312 if (capabilities & SVGA_CAP_PITCHLOCK)
313 DRM_INFO(" Pitchlock.\n");
314 if (capabilities & SVGA_CAP_IRQMASK)
315 DRM_INFO(" Irq mask.\n");
316 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
317 DRM_INFO(" Display Topology.\n");
318 if (capabilities & SVGA_CAP_GMR)
320 if (capabilities & SVGA_CAP_TRACES)
321 DRM_INFO(" Traces.\n");
322 if (capabilities & SVGA_CAP_GMR2)
323 DRM_INFO(" GMR2.\n");
324 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
325 DRM_INFO(" Screen Object 2.\n");
326 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
327 DRM_INFO(" Command Buffers.\n");
328 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
329 DRM_INFO(" Command Buffers 2.\n");
330 if (capabilities & SVGA_CAP_GBOBJECTS)
331 DRM_INFO(" Guest Backed Resources.\n");
332 if (capabilities & SVGA_CAP_DX)
333 DRM_INFO(" DX Features.\n");
334 if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
335 DRM_INFO(" HP Command Queue.\n");
339 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
341 * @dev_priv: A device private structure.
343 * This function creates a small buffer object that holds the query
344 * result for dummy queries emitted as query barriers.
345 * The function will then map the first page and initialize a pending
346 * occlusion query result structure, Finally it will unmap the buffer.
347 * No interruptible waits are done within this function.
349 * Returns an error if bo creation or initialization fails.
351 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
354 struct vmw_buffer_object *vbo;
355 struct ttm_bo_kmap_obj map;
356 volatile SVGA3dQueryResult *result;
360 * Create the vbo as pinned, so that a tryreserve will
361 * immediately succeed. This is because we're the only
362 * user of the bo currently.
364 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
368 ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
369 &vmw_sys_placement, false, true,
371 if (unlikely(ret != 0))
374 ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
376 vmw_bo_pin_reserved(vbo, true);
378 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
379 if (likely(ret == 0)) {
380 result = ttm_kmap_obj_virtual(&map, &dummy);
381 result->totalSize = sizeof(*result);
382 result->state = SVGA3D_QUERYSTATE_PENDING;
383 result->result32 = 0xff;
386 vmw_bo_pin_reserved(vbo, false);
387 ttm_bo_unreserve(&vbo->base);
389 if (unlikely(ret != 0)) {
390 DRM_ERROR("Dummy query buffer map failed.\n");
391 vmw_bo_unreference(&vbo);
393 dev_priv->dummy_query_bo = vbo;
398 static int vmw_device_init(struct vmw_private *dev_priv)
400 bool uses_fb_traces = false;
402 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
403 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
404 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
406 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
407 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
408 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
410 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
411 SVGA_REG_ENABLE_HIDE);
413 uses_fb_traces = !vmw_cmd_supported(dev_priv) &&
414 (dev_priv->capabilities & SVGA_CAP_TRACES) != 0;
416 vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces);
417 dev_priv->fifo = vmw_fifo_create(dev_priv);
418 if (IS_ERR(dev_priv->fifo)) {
419 int err = PTR_ERR(dev_priv->fifo);
420 dev_priv->fifo = NULL;
422 } else if (!dev_priv->fifo) {
423 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
426 dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
427 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
431 static void vmw_device_fini(struct vmw_private *vmw)
436 vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
437 while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
440 vmw->last_read_seqno = vmw_fence_read(vmw);
442 vmw_write(vmw, SVGA_REG_CONFIG_DONE,
443 vmw->config_done_state);
444 vmw_write(vmw, SVGA_REG_ENABLE,
446 vmw_write(vmw, SVGA_REG_TRACES,
449 vmw_fifo_destroy(vmw);
453 * vmw_request_device_late - Perform late device setup
455 * @dev_priv: Pointer to device private.
457 * This function performs setup of otables and enables large command
458 * buffer submission. These tasks are split out to a separate function
459 * because it reverts vmw_release_device_early and is intended to be used
460 * by an error path in the hibernation code.
462 static int vmw_request_device_late(struct vmw_private *dev_priv)
466 if (dev_priv->has_mob) {
467 ret = vmw_otables_setup(dev_priv);
468 if (unlikely(ret != 0)) {
469 DRM_ERROR("Unable to initialize "
470 "guest Memory OBjects.\n");
475 if (dev_priv->cman) {
476 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096);
478 struct vmw_cmdbuf_man *man = dev_priv->cman;
480 dev_priv->cman = NULL;
481 vmw_cmdbuf_man_destroy(man);
488 static int vmw_request_device(struct vmw_private *dev_priv)
492 ret = vmw_device_init(dev_priv);
493 if (unlikely(ret != 0)) {
494 DRM_ERROR("Unable to initialize the device.\n");
497 vmw_fence_fifo_up(dev_priv->fman);
498 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
499 if (IS_ERR(dev_priv->cman)) {
500 dev_priv->cman = NULL;
501 dev_priv->sm_type = VMW_SM_LEGACY;
504 ret = vmw_request_device_late(dev_priv);
508 ret = vmw_dummy_query_bo_create(dev_priv);
509 if (unlikely(ret != 0))
510 goto out_no_query_bo;
516 vmw_cmdbuf_remove_pool(dev_priv->cman);
517 if (dev_priv->has_mob) {
518 struct ttm_resource_manager *man;
520 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
521 ttm_resource_manager_evict_all(&dev_priv->bdev, man);
522 vmw_otables_takedown(dev_priv);
525 vmw_cmdbuf_man_destroy(dev_priv->cman);
527 vmw_fence_fifo_down(dev_priv->fman);
528 vmw_device_fini(dev_priv);
533 * vmw_release_device_early - Early part of fifo takedown.
535 * @dev_priv: Pointer to device private struct.
537 * This is the first part of command submission takedown, to be called before
538 * buffer management is taken down.
540 static void vmw_release_device_early(struct vmw_private *dev_priv)
543 * Previous destructions should've released
547 BUG_ON(dev_priv->pinned_bo != NULL);
549 vmw_bo_unreference(&dev_priv->dummy_query_bo);
551 vmw_cmdbuf_remove_pool(dev_priv->cman);
553 if (dev_priv->has_mob) {
554 struct ttm_resource_manager *man;
556 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
557 ttm_resource_manager_evict_all(&dev_priv->bdev, man);
558 vmw_otables_takedown(dev_priv);
563 * vmw_release_device_late - Late part of fifo takedown.
565 * @dev_priv: Pointer to device private struct.
567 * This is the last part of the command submission takedown, to be called when
568 * command submission is no longer needed. It may wait on pending fences.
570 static void vmw_release_device_late(struct vmw_private *dev_priv)
572 vmw_fence_fifo_down(dev_priv->fman);
574 vmw_cmdbuf_man_destroy(dev_priv->cman);
576 vmw_device_fini(dev_priv);
580 * Sets the initial_[width|height] fields on the given vmw_private.
582 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
583 * clamping the value to fb_max_[width|height] fields and the
584 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
585 * If the values appear to be invalid, set them to
586 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
588 static void vmw_get_initial_size(struct vmw_private *dev_priv)
593 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
594 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
596 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
597 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
599 if (width > dev_priv->fb_max_width ||
600 height > dev_priv->fb_max_height) {
603 * This is a host error and shouldn't occur.
606 width = VMW_MIN_INITIAL_WIDTH;
607 height = VMW_MIN_INITIAL_HEIGHT;
610 dev_priv->initial_width = width;
611 dev_priv->initial_height = height;
615 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
618 * @dev_priv: Pointer to a struct vmw_private
620 * This functions tries to determine what actions need to be taken by the
621 * driver to make system pages visible to the device.
622 * If this function decides that DMA is not possible, it returns -EINVAL.
623 * The driver may then try to disable features of the device that require
626 static int vmw_dma_select_mode(struct vmw_private *dev_priv)
628 static const char *names[vmw_dma_map_max] = {
629 [vmw_dma_phys] = "Using physical TTM page addresses.",
630 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
631 [vmw_dma_map_populate] = "Caching DMA mappings.",
632 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
634 /* TTM currently doesn't fully support SEV encryption. */
635 if (mem_encrypt_active())
638 if (vmw_force_coherent)
639 dev_priv->map_mode = vmw_dma_alloc_coherent;
640 else if (vmw_restrict_iommu)
641 dev_priv->map_mode = vmw_dma_map_bind;
643 dev_priv->map_mode = vmw_dma_map_populate;
645 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
650 * vmw_dma_masks - set required page- and dma masks
652 * @dev_priv: Pointer to struct drm-device
654 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
655 * restriction also for 64-bit systems.
657 static int vmw_dma_masks(struct vmw_private *dev_priv)
659 struct drm_device *dev = &dev_priv->drm;
662 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
663 if (dev_priv->map_mode != vmw_dma_phys &&
664 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
665 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
666 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
672 static int vmw_vram_manager_init(struct vmw_private *dev_priv)
675 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
676 ret = vmw_thp_init(dev_priv);
678 ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
679 dev_priv->vram_size >> PAGE_SHIFT);
681 ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
685 static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
687 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
688 vmw_thp_fini(dev_priv);
690 ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
694 static int vmw_setup_pci_resources(struct vmw_private *dev,
695 unsigned long pci_id)
697 resource_size_t rmmio_start;
698 resource_size_t rmmio_size;
699 resource_size_t fifo_start;
700 resource_size_t fifo_size;
702 struct pci_dev *pdev = to_pci_dev(dev->drm.dev);
704 pci_set_master(pdev);
706 ret = pci_request_regions(pdev, "vmwgfx probe");
710 dev->pci_id = pci_id;
711 if (pci_id == VMWGFX_PCI_ID_SVGA3) {
712 rmmio_start = pci_resource_start(pdev, 0);
713 rmmio_size = pci_resource_len(pdev, 0);
714 dev->vram_start = pci_resource_start(pdev, 2);
715 dev->vram_size = pci_resource_len(pdev, 2);
717 DRM_INFO("Register MMIO at 0x%pa size is %llu kiB\n",
718 &rmmio_start, (uint64_t)rmmio_size / 1024);
719 dev->rmmio = devm_ioremap(dev->drm.dev,
723 DRM_ERROR("Failed mapping registers mmio memory.\n");
724 pci_release_regions(pdev);
727 } else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
728 dev->io_start = pci_resource_start(pdev, 0);
729 dev->vram_start = pci_resource_start(pdev, 1);
730 dev->vram_size = pci_resource_len(pdev, 1);
731 fifo_start = pci_resource_start(pdev, 2);
732 fifo_size = pci_resource_len(pdev, 2);
734 DRM_INFO("FIFO at %pa size is %llu kiB\n",
735 &fifo_start, (uint64_t)fifo_size / 1024);
736 dev->fifo_mem = devm_memremap(dev->drm.dev,
741 if (IS_ERR(dev->fifo_mem)) {
742 DRM_ERROR("Failed mapping FIFO memory.\n");
743 pci_release_regions(pdev);
744 return PTR_ERR(dev->fifo_mem);
747 pci_release_regions(pdev);
752 * This is approximate size of the vram, the exact size will only
753 * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource
754 * size will be equal to or bigger than the size reported by
755 * SVGA_REG_VRAM_SIZE.
757 DRM_INFO("VRAM at %pa size is %llu kiB\n",
758 &dev->vram_start, (uint64_t)dev->vram_size / 1024);
763 static int vmw_detect_version(struct vmw_private *dev)
767 vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ?
768 SVGA_ID_3 : SVGA_ID_2);
769 svga_id = vmw_read(dev, SVGA_REG_ID);
770 if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) {
771 DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n",
772 svga_id, dev->vmw_chipset);
775 BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3));
776 DRM_INFO("Running on SVGA version %d.\n", (svga_id & 0xff));
780 static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
784 bool refuse_dma = false;
785 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
787 dev_priv->vmw_chipset = pci_id;
788 dev_priv->drm.dev_private = dev_priv;
790 mutex_init(&dev_priv->cmdbuf_mutex);
791 mutex_init(&dev_priv->binding_mutex);
792 spin_lock_init(&dev_priv->resource_lock);
793 spin_lock_init(&dev_priv->hw_lock);
794 spin_lock_init(&dev_priv->waiter_lock);
795 spin_lock_init(&dev_priv->cap_lock);
796 spin_lock_init(&dev_priv->cursor_lock);
798 ret = vmw_setup_pci_resources(dev_priv, pci_id);
801 ret = vmw_detect_version(dev_priv);
803 goto out_no_pci_or_version;
806 for (i = vmw_res_context; i < vmw_res_max; ++i) {
807 idr_init_base(&dev_priv->res_idr[i], 1);
808 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
811 init_waitqueue_head(&dev_priv->fence_queue);
812 init_waitqueue_head(&dev_priv->fifo_queue);
813 dev_priv->fence_queue_waiters = 0;
814 dev_priv->fifo_queue_waiters = 0;
816 dev_priv->used_memory_size = 0;
818 dev_priv->assume_16bpp = !!vmw_assume_16bpp;
820 dev_priv->enable_fb = enable_fbdev;
823 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
825 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
826 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
830 ret = vmw_dma_select_mode(dev_priv);
831 if (unlikely(ret != 0)) {
832 DRM_INFO("Restricting capabilities since DMA not available.\n");
834 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
835 DRM_INFO("Disabling 3D acceleration.\n");
838 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
839 dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
840 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
841 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
843 vmw_get_initial_size(dev_priv);
845 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
846 dev_priv->max_gmr_ids =
847 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
848 dev_priv->max_gmr_pages =
849 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
850 dev_priv->memory_size =
851 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
852 dev_priv->memory_size -= dev_priv->vram_size;
855 * An arbitrary limit of 512MiB on surface
856 * memory. But all HWV8 hardware supports GMR2.
858 dev_priv->memory_size = 512*1024*1024;
860 dev_priv->max_mob_pages = 0;
861 dev_priv->max_mob_size = 0;
862 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
865 if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
866 mem_size = vmw_read(dev_priv,
867 SVGA_REG_GBOBJECT_MEM_SIZE_KB);
871 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
874 * Workaround for low memory 2D VMs to compensate for the
875 * allocation taken by fbdev
877 if (!(dev_priv->capabilities & SVGA_CAP_3D))
880 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
881 dev_priv->prim_bb_mem =
883 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
884 dev_priv->max_mob_size =
885 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
886 dev_priv->stdu_max_width =
887 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
888 dev_priv->stdu_max_height =
889 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
891 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
892 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
893 dev_priv->texture_max_width = vmw_read(dev_priv,
895 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
896 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
897 dev_priv->texture_max_height = vmw_read(dev_priv,
900 dev_priv->texture_max_width = 8192;
901 dev_priv->texture_max_height = 8192;
902 dev_priv->prim_bb_mem = dev_priv->vram_size;
905 vmw_print_capabilities(dev_priv->capabilities);
906 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
907 vmw_print_capabilities2(dev_priv->capabilities2);
908 DRM_INFO("Supports command queues = %d\n",
909 vmw_cmd_supported((dev_priv)));
911 ret = vmw_dma_masks(dev_priv);
912 if (unlikely(ret != 0))
915 dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
917 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
918 DRM_INFO("Max GMR ids is %u\n",
919 (unsigned)dev_priv->max_gmr_ids);
920 DRM_INFO("Max number of GMR pages is %u\n",
921 (unsigned)dev_priv->max_gmr_pages);
922 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
923 (unsigned)dev_priv->memory_size / 1024);
925 DRM_INFO("Maximum display memory size is %llu kiB\n",
926 (uint64_t)dev_priv->prim_bb_mem / 1024);
928 /* Need mmio memory to check for fifo pitchlock cap. */
929 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
930 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
931 !vmw_fifo_have_pitchlock(dev_priv)) {
933 DRM_ERROR("Hardware has no pitchlock\n");
937 dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
938 &vmw_prime_dmabuf_ops);
940 if (unlikely(dev_priv->tdev == NULL)) {
941 DRM_ERROR("Unable to initialize TTM object management.\n");
946 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
947 ret = vmw_irq_install(&dev_priv->drm, pdev->irq);
949 DRM_ERROR("Failed installing irq: %d\n", ret);
954 dev_priv->fman = vmw_fence_manager_init(dev_priv);
955 if (unlikely(dev_priv->fman == NULL)) {
960 drm_vma_offset_manager_init(&dev_priv->vma_manager,
961 DRM_FILE_PAGE_OFFSET_START,
962 DRM_FILE_PAGE_OFFSET_SIZE);
963 ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
965 dev_priv->drm.anon_inode->i_mapping,
966 &dev_priv->vma_manager,
967 dev_priv->map_mode == vmw_dma_alloc_coherent,
969 if (unlikely(ret != 0)) {
970 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
975 * Enable VRAM, but initially don't use it until SVGA is enabled and
979 ret = vmw_vram_manager_init(dev_priv);
980 if (unlikely(ret != 0)) {
981 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
986 * "Guest Memory Regions" is an aperture like feature with
987 * one slot per bo. There is an upper limit of the number of
988 * slots as well as the bo size.
990 dev_priv->has_gmr = true;
991 /* TODO: This is most likely not correct */
992 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
994 vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
995 DRM_INFO("No GMR memory available. "
996 "Graphics memory resources are very limited.\n");
997 dev_priv->has_gmr = false;
1000 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
1001 dev_priv->has_mob = true;
1003 if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
1004 DRM_INFO("No MOB memory available. "
1005 "3D will be disabled.\n");
1006 dev_priv->has_mob = false;
1010 if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
1011 spin_lock(&dev_priv->cap_lock);
1012 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
1013 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
1014 dev_priv->sm_type = VMW_SM_4;
1015 spin_unlock(&dev_priv->cap_lock);
1018 vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
1020 /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
1021 if (has_sm4_context(dev_priv) &&
1022 (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
1023 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41);
1025 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
1026 dev_priv->sm_type = VMW_SM_4_1;
1028 if (has_sm4_1_context(dev_priv) &&
1029 (dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
1030 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM5);
1031 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
1032 dev_priv->sm_type = VMW_SM_5;
1036 ret = vmw_kms_init(dev_priv);
1037 if (unlikely(ret != 0))
1039 vmw_overlay_init(dev_priv);
1041 ret = vmw_request_device(dev_priv);
1045 if (dev_priv->sm_type == VMW_SM_5)
1046 DRM_INFO("SM5 support available.\n");
1047 if (dev_priv->sm_type == VMW_SM_4_1)
1048 DRM_INFO("SM4_1 support available.\n");
1049 if (dev_priv->sm_type == VMW_SM_4)
1050 DRM_INFO("SM4 support available.\n");
1051 DRM_INFO("Running without reservation semaphore\n");
1053 vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
1054 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
1055 VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
1057 if (dev_priv->enable_fb) {
1058 vmw_fifo_resource_inc(dev_priv);
1059 vmw_svga_enable(dev_priv);
1060 vmw_fb_init(dev_priv);
1063 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
1064 register_pm_notifier(&dev_priv->pm_nb);
1069 vmw_overlay_close(dev_priv);
1070 vmw_kms_close(dev_priv);
1072 if (dev_priv->has_mob)
1073 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1074 if (dev_priv->has_gmr)
1075 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
1076 vmw_vram_manager_fini(dev_priv);
1078 ttm_device_fini(&dev_priv->bdev);
1080 vmw_fence_manager_takedown(dev_priv->fman);
1082 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1083 vmw_irq_uninstall(&dev_priv->drm);
1085 ttm_object_device_release(&dev_priv->tdev);
1087 for (i = vmw_res_context; i < vmw_res_max; ++i)
1088 idr_destroy(&dev_priv->res_idr[i]);
1090 if (dev_priv->ctx.staged_bindings)
1091 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1092 out_no_pci_or_version:
1093 pci_release_regions(pdev);
1097 static void vmw_driver_unload(struct drm_device *dev)
1099 struct vmw_private *dev_priv = vmw_priv(dev);
1100 struct pci_dev *pdev = to_pci_dev(dev->dev);
1101 enum vmw_res_type i;
1103 unregister_pm_notifier(&dev_priv->pm_nb);
1105 if (dev_priv->ctx.res_ht_initialized)
1106 drm_ht_remove(&dev_priv->ctx.res_ht);
1107 vfree(dev_priv->ctx.cmd_bounce);
1108 if (dev_priv->enable_fb) {
1109 vmw_fb_off(dev_priv);
1110 vmw_fb_close(dev_priv);
1111 vmw_fifo_resource_dec(dev_priv);
1112 vmw_svga_disable(dev_priv);
1115 vmw_kms_close(dev_priv);
1116 vmw_overlay_close(dev_priv);
1118 if (dev_priv->has_gmr)
1119 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
1121 vmw_release_device_early(dev_priv);
1122 if (dev_priv->has_mob)
1123 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1124 vmw_vram_manager_fini(dev_priv);
1125 ttm_device_fini(&dev_priv->bdev);
1126 drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
1127 vmw_release_device_late(dev_priv);
1128 vmw_fence_manager_takedown(dev_priv->fman);
1129 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1130 vmw_irq_uninstall(&dev_priv->drm);
1132 ttm_object_device_release(&dev_priv->tdev);
1133 if (dev_priv->ctx.staged_bindings)
1134 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1136 for (i = vmw_res_context; i < vmw_res_max; ++i)
1137 idr_destroy(&dev_priv->res_idr[i]);
1139 pci_release_regions(pdev);
1142 static void vmw_postclose(struct drm_device *dev,
1143 struct drm_file *file_priv)
1145 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1147 ttm_object_file_release(&vmw_fp->tfile);
1151 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1153 struct vmw_private *dev_priv = vmw_priv(dev);
1154 struct vmw_fpriv *vmw_fp;
1157 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1158 if (unlikely(!vmw_fp))
1161 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1162 if (unlikely(vmw_fp->tfile == NULL))
1165 file_priv->driver_priv = vmw_fp;
1174 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1176 long (*ioctl_func)(struct file *, unsigned int,
1179 struct drm_file *file_priv = filp->private_data;
1180 struct drm_device *dev = file_priv->minor->dev;
1181 unsigned int nr = DRM_IOCTL_NR(cmd);
1185 * Do extra checking on driver private ioctls.
1188 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1189 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1190 const struct drm_ioctl_desc *ioctl =
1191 &vmw_ioctls[nr - DRM_COMMAND_BASE];
1193 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1194 return ioctl_func(filp, cmd, arg);
1195 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1196 if (!drm_is_current_master(file_priv) &&
1197 !capable(CAP_SYS_ADMIN))
1201 if (unlikely(ioctl->cmd != cmd))
1202 goto out_io_encoding;
1204 flags = ioctl->flags;
1205 } else if (!drm_ioctl_flags(nr, &flags))
1208 return ioctl_func(filp, cmd, arg);
1211 DRM_ERROR("Invalid command format, ioctl %d\n",
1212 nr - DRM_COMMAND_BASE);
1217 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1220 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1223 #ifdef CONFIG_COMPAT
1224 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1227 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1231 static void vmw_master_set(struct drm_device *dev,
1232 struct drm_file *file_priv,
1236 * Inform a new master that the layout may have changed while
1240 drm_sysfs_hotplug_event(dev);
1243 static void vmw_master_drop(struct drm_device *dev,
1244 struct drm_file *file_priv)
1246 struct vmw_private *dev_priv = vmw_priv(dev);
1248 vmw_kms_legacy_hotspot_clear(dev_priv);
1249 if (!dev_priv->enable_fb)
1250 vmw_svga_disable(dev_priv);
1254 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1256 * @dev_priv: Pointer to device private struct.
1257 * Needs the reservation sem to be held in non-exclusive mode.
1259 static void __vmw_svga_enable(struct vmw_private *dev_priv)
1261 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1263 if (!ttm_resource_manager_used(man)) {
1264 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE);
1265 ttm_resource_manager_set_used(man, true);
1270 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1272 * @dev_priv: Pointer to device private struct.
1274 void vmw_svga_enable(struct vmw_private *dev_priv)
1276 __vmw_svga_enable(dev_priv);
1280 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1282 * @dev_priv: Pointer to device private struct.
1283 * Needs the reservation sem to be held in exclusive mode.
1284 * Will not empty VRAM. VRAM must be emptied by caller.
1286 static void __vmw_svga_disable(struct vmw_private *dev_priv)
1288 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1290 if (ttm_resource_manager_used(man)) {
1291 ttm_resource_manager_set_used(man, false);
1292 vmw_write(dev_priv, SVGA_REG_ENABLE,
1293 SVGA_REG_ENABLE_HIDE |
1294 SVGA_REG_ENABLE_ENABLE);
1299 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1302 * @dev_priv: Pointer to device private struct.
1305 void vmw_svga_disable(struct vmw_private *dev_priv)
1307 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1309 * Disabling SVGA will turn off device modesetting capabilities, so
1310 * notify KMS about that so that it doesn't cache atomic state that
1311 * isn't valid anymore, for example crtcs turned on.
1312 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1313 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1314 * end up with lock order reversal. Thus, a master may actually perform
1315 * a new modeset just after we call vmw_kms_lost_device() and race with
1316 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1317 * to be inconsistent with the device, causing modesetting problems.
1320 vmw_kms_lost_device(&dev_priv->drm);
1321 if (ttm_resource_manager_used(man)) {
1322 if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
1323 DRM_ERROR("Failed evicting VRAM buffers.\n");
1324 ttm_resource_manager_set_used(man, false);
1325 vmw_write(dev_priv, SVGA_REG_ENABLE,
1326 SVGA_REG_ENABLE_HIDE |
1327 SVGA_REG_ENABLE_ENABLE);
1331 static void vmw_remove(struct pci_dev *pdev)
1333 struct drm_device *dev = pci_get_drvdata(pdev);
1335 ttm_mem_global_release(&ttm_mem_glob);
1336 drm_dev_unregister(dev);
1337 vmw_driver_unload(dev);
1340 static unsigned long
1341 vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
1342 unsigned long len, unsigned long pgoff,
1343 unsigned long flags)
1345 struct drm_file *file_priv = file->private_data;
1346 struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
1348 return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
1349 &dev_priv->vma_manager);
1352 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1355 struct vmw_private *dev_priv =
1356 container_of(nb, struct vmw_private, pm_nb);
1359 case PM_HIBERNATION_PREPARE:
1361 * Take the reservation sem in write mode, which will make sure
1362 * there are no other processes holding a buffer object
1363 * reservation, meaning we should be able to evict all buffer
1364 * objects if needed.
1365 * Once user-space processes have been frozen, we can release
1368 dev_priv->suspend_locked = true;
1370 case PM_POST_HIBERNATION:
1371 case PM_POST_RESTORE:
1372 if (READ_ONCE(dev_priv->suspend_locked)) {
1373 dev_priv->suspend_locked = false;
1382 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1384 struct drm_device *dev = pci_get_drvdata(pdev);
1385 struct vmw_private *dev_priv = vmw_priv(dev);
1387 if (dev_priv->refuse_hibernation)
1390 pci_save_state(pdev);
1391 pci_disable_device(pdev);
1392 pci_set_power_state(pdev, PCI_D3hot);
1396 static int vmw_pci_resume(struct pci_dev *pdev)
1398 pci_set_power_state(pdev, PCI_D0);
1399 pci_restore_state(pdev);
1400 return pci_enable_device(pdev);
1403 static int vmw_pm_suspend(struct device *kdev)
1405 struct pci_dev *pdev = to_pci_dev(kdev);
1406 struct pm_message dummy;
1410 return vmw_pci_suspend(pdev, dummy);
1413 static int vmw_pm_resume(struct device *kdev)
1415 struct pci_dev *pdev = to_pci_dev(kdev);
1417 return vmw_pci_resume(pdev);
1420 static int vmw_pm_freeze(struct device *kdev)
1422 struct pci_dev *pdev = to_pci_dev(kdev);
1423 struct drm_device *dev = pci_get_drvdata(pdev);
1424 struct vmw_private *dev_priv = vmw_priv(dev);
1425 struct ttm_operation_ctx ctx = {
1426 .interruptible = false,
1427 .no_wait_gpu = false
1432 * No user-space processes should be running now.
1434 ret = vmw_kms_suspend(&dev_priv->drm);
1436 DRM_ERROR("Failed to freeze modesetting.\n");
1439 if (dev_priv->enable_fb)
1440 vmw_fb_off(dev_priv);
1442 vmw_execbuf_release_pinned_bo(dev_priv);
1443 vmw_resource_evict_all(dev_priv);
1444 vmw_release_device_early(dev_priv);
1445 while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0);
1446 if (dev_priv->enable_fb)
1447 vmw_fifo_resource_dec(dev_priv);
1448 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1449 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1450 if (dev_priv->enable_fb)
1451 vmw_fifo_resource_inc(dev_priv);
1452 WARN_ON(vmw_request_device_late(dev_priv));
1453 dev_priv->suspend_locked = false;
1454 if (dev_priv->suspend_state)
1455 vmw_kms_resume(dev);
1456 if (dev_priv->enable_fb)
1457 vmw_fb_on(dev_priv);
1461 vmw_fence_fifo_down(dev_priv->fman);
1462 __vmw_svga_disable(dev_priv);
1464 vmw_release_device_late(dev_priv);
1468 static int vmw_pm_restore(struct device *kdev)
1470 struct pci_dev *pdev = to_pci_dev(kdev);
1471 struct drm_device *dev = pci_get_drvdata(pdev);
1472 struct vmw_private *dev_priv = vmw_priv(dev);
1475 vmw_detect_version(dev_priv);
1477 if (dev_priv->enable_fb)
1478 vmw_fifo_resource_inc(dev_priv);
1480 ret = vmw_request_device(dev_priv);
1484 if (dev_priv->enable_fb)
1485 __vmw_svga_enable(dev_priv);
1487 vmw_fence_fifo_up(dev_priv->fman);
1488 dev_priv->suspend_locked = false;
1489 if (dev_priv->suspend_state)
1490 vmw_kms_resume(&dev_priv->drm);
1492 if (dev_priv->enable_fb)
1493 vmw_fb_on(dev_priv);
1498 static const struct dev_pm_ops vmw_pm_ops = {
1499 .freeze = vmw_pm_freeze,
1500 .thaw = vmw_pm_restore,
1501 .restore = vmw_pm_restore,
1502 .suspend = vmw_pm_suspend,
1503 .resume = vmw_pm_resume,
1506 static const struct file_operations vmwgfx_driver_fops = {
1507 .owner = THIS_MODULE,
1509 .release = drm_release,
1510 .unlocked_ioctl = vmw_unlocked_ioctl,
1514 #if defined(CONFIG_COMPAT)
1515 .compat_ioctl = vmw_compat_ioctl,
1517 .llseek = noop_llseek,
1518 .get_unmapped_area = vmw_get_unmapped_area,
1521 static const struct drm_driver driver = {
1523 DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
1524 .ioctls = vmw_ioctls,
1525 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1526 .master_set = vmw_master_set,
1527 .master_drop = vmw_master_drop,
1528 .open = vmw_driver_open,
1529 .postclose = vmw_postclose,
1531 .dumb_create = vmw_dumb_create,
1532 .dumb_map_offset = vmw_dumb_map_offset,
1533 .dumb_destroy = vmw_dumb_destroy,
1535 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1536 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1538 .fops = &vmwgfx_driver_fops,
1539 .name = VMWGFX_DRIVER_NAME,
1540 .desc = VMWGFX_DRIVER_DESC,
1541 .date = VMWGFX_DRIVER_DATE,
1542 .major = VMWGFX_DRIVER_MAJOR,
1543 .minor = VMWGFX_DRIVER_MINOR,
1544 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1547 static struct pci_driver vmw_pci_driver = {
1548 .name = VMWGFX_DRIVER_NAME,
1549 .id_table = vmw_pci_id_list,
1551 .remove = vmw_remove,
1557 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1559 struct vmw_private *vmw;
1562 ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb");
1566 ret = pcim_enable_device(pdev);
1570 vmw = devm_drm_dev_alloc(&pdev->dev, &driver,
1571 struct vmw_private, drm);
1573 return PTR_ERR(vmw);
1575 pci_set_drvdata(pdev, &vmw->drm);
1577 ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev);
1581 ret = vmw_driver_load(vmw, ent->device);
1585 ret = drm_dev_register(&vmw->drm, 0);
1587 vmw_driver_unload(&vmw->drm);
1594 static int __init vmwgfx_init(void)
1598 if (vgacon_text_force())
1601 ret = pci_register_driver(&vmw_pci_driver);
1603 DRM_ERROR("Failed initializing DRM.\n");
1607 static void __exit vmwgfx_exit(void)
1609 pci_unregister_driver(&vmw_pci_driver);
1612 module_init(vmwgfx_init);
1613 module_exit(vmwgfx_exit);
1615 MODULE_AUTHOR("VMware Inc. and others");
1616 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1617 MODULE_LICENSE("GPL and additional rights");
1618 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1619 __stringify(VMWGFX_DRIVER_MINOR) "."
1620 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."