1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <linux/console.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/mem_encrypt.h>
34 #include <drm/drm_drv.h>
35 #include <drm/drm_ioctl.h>
36 #include <drm/drm_sysfs.h>
37 #include <drm/ttm/ttm_bo_driver.h>
38 #include <drm/ttm/ttm_placement.h>
40 #include "ttm_object.h"
41 #include "vmwgfx_binding.h"
42 #include "vmwgfx_drv.h"
44 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
45 #define VMWGFX_CHIP_SVGAII 0
46 #define VMW_FB_RESERVATION 0
48 #define VMW_MIN_INITIAL_WIDTH 800
49 #define VMW_MIN_INITIAL_HEIGHT 600
51 #ifndef VMWGFX_GIT_VERSION
52 #define VMWGFX_GIT_VERSION "Unknown"
55 #define VMWGFX_REPO "In Tree"
57 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
61 * Fully encoded drm commands. Might move to vmw_drm.h
64 #define DRM_IOCTL_VMW_GET_PARAM \
65 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
66 struct drm_vmw_getparam_arg)
67 #define DRM_IOCTL_VMW_ALLOC_DMABUF \
68 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
69 union drm_vmw_alloc_dmabuf_arg)
70 #define DRM_IOCTL_VMW_UNREF_DMABUF \
71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
72 struct drm_vmw_unref_dmabuf_arg)
73 #define DRM_IOCTL_VMW_CURSOR_BYPASS \
74 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
75 struct drm_vmw_cursor_bypass_arg)
77 #define DRM_IOCTL_VMW_CONTROL_STREAM \
78 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
79 struct drm_vmw_control_stream_arg)
80 #define DRM_IOCTL_VMW_CLAIM_STREAM \
81 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
82 struct drm_vmw_stream_arg)
83 #define DRM_IOCTL_VMW_UNREF_STREAM \
84 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
85 struct drm_vmw_stream_arg)
87 #define DRM_IOCTL_VMW_CREATE_CONTEXT \
88 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
89 struct drm_vmw_context_arg)
90 #define DRM_IOCTL_VMW_UNREF_CONTEXT \
91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
92 struct drm_vmw_context_arg)
93 #define DRM_IOCTL_VMW_CREATE_SURFACE \
94 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
95 union drm_vmw_surface_create_arg)
96 #define DRM_IOCTL_VMW_UNREF_SURFACE \
97 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
98 struct drm_vmw_surface_arg)
99 #define DRM_IOCTL_VMW_REF_SURFACE \
100 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
101 union drm_vmw_surface_reference_arg)
102 #define DRM_IOCTL_VMW_EXECBUF \
103 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
104 struct drm_vmw_execbuf_arg)
105 #define DRM_IOCTL_VMW_GET_3D_CAP \
106 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
107 struct drm_vmw_get_3d_cap_arg)
108 #define DRM_IOCTL_VMW_FENCE_WAIT \
109 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
110 struct drm_vmw_fence_wait_arg)
111 #define DRM_IOCTL_VMW_FENCE_SIGNALED \
112 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
113 struct drm_vmw_fence_signaled_arg)
114 #define DRM_IOCTL_VMW_FENCE_UNREF \
115 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
116 struct drm_vmw_fence_arg)
117 #define DRM_IOCTL_VMW_FENCE_EVENT \
118 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
119 struct drm_vmw_fence_event_arg)
120 #define DRM_IOCTL_VMW_PRESENT \
121 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
122 struct drm_vmw_present_arg)
123 #define DRM_IOCTL_VMW_PRESENT_READBACK \
124 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
125 struct drm_vmw_present_readback_arg)
126 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
127 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
128 struct drm_vmw_update_layout_arg)
129 #define DRM_IOCTL_VMW_CREATE_SHADER \
130 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
131 struct drm_vmw_shader_create_arg)
132 #define DRM_IOCTL_VMW_UNREF_SHADER \
133 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
134 struct drm_vmw_shader_arg)
135 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
136 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
137 union drm_vmw_gb_surface_create_arg)
138 #define DRM_IOCTL_VMW_GB_SURFACE_REF \
139 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
140 union drm_vmw_gb_surface_reference_arg)
141 #define DRM_IOCTL_VMW_SYNCCPU \
142 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
143 struct drm_vmw_synccpu_arg)
144 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
145 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
146 struct drm_vmw_context_arg)
147 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \
148 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \
149 union drm_vmw_gb_surface_create_ext_arg)
150 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \
151 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \
152 union drm_vmw_gb_surface_reference_ext_arg)
153 #define DRM_IOCTL_VMW_MSG \
154 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
155 struct drm_vmw_msg_arg)
158 * The core DRM version of this macro doesn't account for
162 #define VMW_IOCTL_DEF(ioctl, func, flags) \
163 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
169 static const struct drm_ioctl_desc vmw_ioctls[] = {
170 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
172 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
174 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
176 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
177 vmw_kms_cursor_bypass_ioctl,
180 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
182 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
184 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
187 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
189 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
191 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
193 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
195 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
197 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
199 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
201 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
202 vmw_fence_obj_signaled_ioctl,
204 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
206 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
208 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
211 /* these allow direct access to the framebuffers mark as master only */
212 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
213 DRM_MASTER | DRM_AUTH),
214 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
215 vmw_present_readback_ioctl,
216 DRM_MASTER | DRM_AUTH),
218 * The permissions of the below ioctl are overridden in
219 * vmw_generic_ioctl(). We require either
220 * DRM_MASTER or capable(CAP_SYS_ADMIN).
222 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
223 vmw_kms_update_layout_ioctl,
225 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
226 vmw_shader_define_ioctl,
228 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
229 vmw_shader_destroy_ioctl,
231 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
232 vmw_gb_surface_define_ioctl,
234 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
235 vmw_gb_surface_reference_ioctl,
237 VMW_IOCTL_DEF(VMW_SYNCCPU,
238 vmw_user_bo_synccpu_ioctl,
240 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
241 vmw_extended_context_define_ioctl,
243 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
244 vmw_gb_surface_define_ext_ioctl,
246 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
247 vmw_gb_surface_reference_ext_ioctl,
249 VMW_IOCTL_DEF(VMW_MSG,
254 static const struct pci_device_id vmw_pci_id_list[] = {
255 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
258 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
260 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
261 static int vmw_force_iommu;
262 static int vmw_restrict_iommu;
263 static int vmw_force_coherent;
264 static int vmw_restrict_dma_mask;
265 static int vmw_assume_16bpp;
267 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
268 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
271 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
272 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
273 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
274 module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
275 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
276 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
277 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
278 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
279 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
280 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
281 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
282 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
285 static void vmw_print_capabilities2(uint32_t capabilities2)
287 DRM_INFO("Capabilities2:\n");
288 if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
289 DRM_INFO(" Grow oTable.\n");
290 if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
291 DRM_INFO(" IntraSurface copy.\n");
292 if (capabilities2 & SVGA_CAP2_DX3)
296 static void vmw_print_capabilities(uint32_t capabilities)
298 DRM_INFO("Capabilities:\n");
299 if (capabilities & SVGA_CAP_RECT_COPY)
300 DRM_INFO(" Rect copy.\n");
301 if (capabilities & SVGA_CAP_CURSOR)
302 DRM_INFO(" Cursor.\n");
303 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
304 DRM_INFO(" Cursor bypass.\n");
305 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
306 DRM_INFO(" Cursor bypass 2.\n");
307 if (capabilities & SVGA_CAP_8BIT_EMULATION)
308 DRM_INFO(" 8bit emulation.\n");
309 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
310 DRM_INFO(" Alpha cursor.\n");
311 if (capabilities & SVGA_CAP_3D)
313 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
314 DRM_INFO(" Extended Fifo.\n");
315 if (capabilities & SVGA_CAP_MULTIMON)
316 DRM_INFO(" Multimon.\n");
317 if (capabilities & SVGA_CAP_PITCHLOCK)
318 DRM_INFO(" Pitchlock.\n");
319 if (capabilities & SVGA_CAP_IRQMASK)
320 DRM_INFO(" Irq mask.\n");
321 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
322 DRM_INFO(" Display Topology.\n");
323 if (capabilities & SVGA_CAP_GMR)
325 if (capabilities & SVGA_CAP_TRACES)
326 DRM_INFO(" Traces.\n");
327 if (capabilities & SVGA_CAP_GMR2)
328 DRM_INFO(" GMR2.\n");
329 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
330 DRM_INFO(" Screen Object 2.\n");
331 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
332 DRM_INFO(" Command Buffers.\n");
333 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
334 DRM_INFO(" Command Buffers 2.\n");
335 if (capabilities & SVGA_CAP_GBOBJECTS)
336 DRM_INFO(" Guest Backed Resources.\n");
337 if (capabilities & SVGA_CAP_DX)
338 DRM_INFO(" DX Features.\n");
339 if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
340 DRM_INFO(" HP Command Queue.\n");
344 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
346 * @dev_priv: A device private structure.
348 * This function creates a small buffer object that holds the query
349 * result for dummy queries emitted as query barriers.
350 * The function will then map the first page and initialize a pending
351 * occlusion query result structure, Finally it will unmap the buffer.
352 * No interruptible waits are done within this function.
354 * Returns an error if bo creation or initialization fails.
356 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
359 struct vmw_buffer_object *vbo;
360 struct ttm_bo_kmap_obj map;
361 volatile SVGA3dQueryResult *result;
365 * Create the vbo as pinned, so that a tryreserve will
366 * immediately succeed. This is because we're the only
367 * user of the bo currently.
369 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
373 ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
374 &vmw_sys_placement, false, true,
376 if (unlikely(ret != 0))
379 ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
381 vmw_bo_pin_reserved(vbo, true);
383 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
384 if (likely(ret == 0)) {
385 result = ttm_kmap_obj_virtual(&map, &dummy);
386 result->totalSize = sizeof(*result);
387 result->state = SVGA3D_QUERYSTATE_PENDING;
388 result->result32 = 0xff;
391 vmw_bo_pin_reserved(vbo, false);
392 ttm_bo_unreserve(&vbo->base);
394 if (unlikely(ret != 0)) {
395 DRM_ERROR("Dummy query buffer map failed.\n");
396 vmw_bo_unreference(&vbo);
398 dev_priv->dummy_query_bo = vbo;
404 * vmw_request_device_late - Perform late device setup
406 * @dev_priv: Pointer to device private.
408 * This function performs setup of otables and enables large command
409 * buffer submission. These tasks are split out to a separate function
410 * because it reverts vmw_release_device_early and is intended to be used
411 * by an error path in the hibernation code.
413 static int vmw_request_device_late(struct vmw_private *dev_priv)
417 if (dev_priv->has_mob) {
418 ret = vmw_otables_setup(dev_priv);
419 if (unlikely(ret != 0)) {
420 DRM_ERROR("Unable to initialize "
421 "guest Memory OBjects.\n");
426 if (dev_priv->cman) {
427 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
430 struct vmw_cmdbuf_man *man = dev_priv->cman;
432 dev_priv->cman = NULL;
433 vmw_cmdbuf_man_destroy(man);
440 static int vmw_request_device(struct vmw_private *dev_priv)
444 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
445 if (unlikely(ret != 0)) {
446 DRM_ERROR("Unable to initialize FIFO.\n");
449 vmw_fence_fifo_up(dev_priv->fman);
450 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
451 if (IS_ERR(dev_priv->cman)) {
452 dev_priv->cman = NULL;
453 dev_priv->sm_type = VMW_SM_LEGACY;
456 ret = vmw_request_device_late(dev_priv);
460 ret = vmw_dummy_query_bo_create(dev_priv);
461 if (unlikely(ret != 0))
462 goto out_no_query_bo;
468 vmw_cmdbuf_remove_pool(dev_priv->cman);
469 if (dev_priv->has_mob) {
470 struct ttm_resource_manager *man;
472 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
473 ttm_resource_manager_evict_all(&dev_priv->bdev, man);
474 vmw_otables_takedown(dev_priv);
477 vmw_cmdbuf_man_destroy(dev_priv->cman);
479 vmw_fence_fifo_down(dev_priv->fman);
480 vmw_fifo_release(dev_priv, &dev_priv->fifo);
485 * vmw_release_device_early - Early part of fifo takedown.
487 * @dev_priv: Pointer to device private struct.
489 * This is the first part of command submission takedown, to be called before
490 * buffer management is taken down.
492 static void vmw_release_device_early(struct vmw_private *dev_priv)
495 * Previous destructions should've released
499 BUG_ON(dev_priv->pinned_bo != NULL);
501 vmw_bo_unreference(&dev_priv->dummy_query_bo);
503 vmw_cmdbuf_remove_pool(dev_priv->cman);
505 if (dev_priv->has_mob) {
506 struct ttm_resource_manager *man;
508 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
509 ttm_resource_manager_evict_all(&dev_priv->bdev, man);
510 vmw_otables_takedown(dev_priv);
515 * vmw_release_device_late - Late part of fifo takedown.
517 * @dev_priv: Pointer to device private struct.
519 * This is the last part of the command submission takedown, to be called when
520 * command submission is no longer needed. It may wait on pending fences.
522 static void vmw_release_device_late(struct vmw_private *dev_priv)
524 vmw_fence_fifo_down(dev_priv->fman);
526 vmw_cmdbuf_man_destroy(dev_priv->cman);
528 vmw_fifo_release(dev_priv, &dev_priv->fifo);
532 * Sets the initial_[width|height] fields on the given vmw_private.
534 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
535 * clamping the value to fb_max_[width|height] fields and the
536 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
537 * If the values appear to be invalid, set them to
538 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
540 static void vmw_get_initial_size(struct vmw_private *dev_priv)
545 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
546 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
548 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
549 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
551 if (width > dev_priv->fb_max_width ||
552 height > dev_priv->fb_max_height) {
555 * This is a host error and shouldn't occur.
558 width = VMW_MIN_INITIAL_WIDTH;
559 height = VMW_MIN_INITIAL_HEIGHT;
562 dev_priv->initial_width = width;
563 dev_priv->initial_height = height;
567 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
570 * @dev_priv: Pointer to a struct vmw_private
572 * This functions tries to determine what actions need to be taken by the
573 * driver to make system pages visible to the device.
574 * If this function decides that DMA is not possible, it returns -EINVAL.
575 * The driver may then try to disable features of the device that require
578 static int vmw_dma_select_mode(struct vmw_private *dev_priv)
580 static const char *names[vmw_dma_map_max] = {
581 [vmw_dma_phys] = "Using physical TTM page addresses.",
582 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
583 [vmw_dma_map_populate] = "Caching DMA mappings.",
584 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
586 /* TTM currently doesn't fully support SEV encryption. */
587 if (mem_encrypt_active())
590 if (vmw_force_coherent)
591 dev_priv->map_mode = vmw_dma_alloc_coherent;
592 else if (vmw_restrict_iommu)
593 dev_priv->map_mode = vmw_dma_map_bind;
595 dev_priv->map_mode = vmw_dma_map_populate;
597 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
602 * vmw_dma_masks - set required page- and dma masks
604 * @dev: Pointer to struct drm-device
606 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
607 * restriction also for 64-bit systems.
609 static int vmw_dma_masks(struct vmw_private *dev_priv)
611 struct drm_device *dev = dev_priv->dev;
614 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
615 if (dev_priv->map_mode != vmw_dma_phys &&
616 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
617 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
618 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
624 static int vmw_vram_manager_init(struct vmw_private *dev_priv)
627 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
628 ret = vmw_thp_init(dev_priv);
630 ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
631 dev_priv->vram_size >> PAGE_SHIFT);
633 ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
637 static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
639 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
640 vmw_thp_fini(dev_priv);
642 ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
646 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
648 struct vmw_private *dev_priv;
652 bool refuse_dma = false;
653 char host_log[100] = {0};
655 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
656 if (unlikely(!dev_priv)) {
657 DRM_ERROR("Failed allocating a device private struct.\n");
661 pci_set_master(dev->pdev);
664 dev_priv->vmw_chipset = chipset;
665 dev_priv->last_read_seqno = (uint32_t) -100;
666 mutex_init(&dev_priv->cmdbuf_mutex);
667 mutex_init(&dev_priv->release_mutex);
668 mutex_init(&dev_priv->binding_mutex);
669 mutex_init(&dev_priv->global_kms_state_mutex);
670 ttm_lock_init(&dev_priv->reservation_sem);
671 spin_lock_init(&dev_priv->resource_lock);
672 spin_lock_init(&dev_priv->hw_lock);
673 spin_lock_init(&dev_priv->waiter_lock);
674 spin_lock_init(&dev_priv->cap_lock);
675 spin_lock_init(&dev_priv->svga_lock);
676 spin_lock_init(&dev_priv->cursor_lock);
678 for (i = vmw_res_context; i < vmw_res_max; ++i) {
679 idr_init(&dev_priv->res_idr[i]);
680 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
683 init_waitqueue_head(&dev_priv->fence_queue);
684 init_waitqueue_head(&dev_priv->fifo_queue);
685 dev_priv->fence_queue_waiters = 0;
686 dev_priv->fifo_queue_waiters = 0;
688 dev_priv->used_memory_size = 0;
690 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
691 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
692 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
694 dev_priv->assume_16bpp = !!vmw_assume_16bpp;
696 dev_priv->enable_fb = enable_fbdev;
698 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
699 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
700 if (svga_id != SVGA_ID_2) {
702 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
706 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
708 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
709 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
713 ret = vmw_dma_select_mode(dev_priv);
714 if (unlikely(ret != 0)) {
715 DRM_INFO("Restricting capabilities since DMA not available.\n");
717 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
718 DRM_INFO("Disabling 3D acceleration.\n");
721 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
722 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
723 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
724 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
726 vmw_get_initial_size(dev_priv);
728 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
729 dev_priv->max_gmr_ids =
730 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
731 dev_priv->max_gmr_pages =
732 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
733 dev_priv->memory_size =
734 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
735 dev_priv->memory_size -= dev_priv->vram_size;
738 * An arbitrary limit of 512MiB on surface
739 * memory. But all HWV8 hardware supports GMR2.
741 dev_priv->memory_size = 512*1024*1024;
743 dev_priv->max_mob_pages = 0;
744 dev_priv->max_mob_size = 0;
745 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
748 if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2)
749 mem_size = vmw_read(dev_priv,
750 SVGA_REG_GBOBJECT_MEM_SIZE_KB);
754 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
757 * Workaround for low memory 2D VMs to compensate for the
758 * allocation taken by fbdev
760 if (!(dev_priv->capabilities & SVGA_CAP_3D))
763 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
764 dev_priv->prim_bb_mem =
766 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
767 dev_priv->max_mob_size =
768 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
769 dev_priv->stdu_max_width =
770 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
771 dev_priv->stdu_max_height =
772 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
774 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
775 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
776 dev_priv->texture_max_width = vmw_read(dev_priv,
778 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
779 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
780 dev_priv->texture_max_height = vmw_read(dev_priv,
783 dev_priv->texture_max_width = 8192;
784 dev_priv->texture_max_height = 8192;
785 dev_priv->prim_bb_mem = dev_priv->vram_size;
788 vmw_print_capabilities(dev_priv->capabilities);
789 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
790 vmw_print_capabilities2(dev_priv->capabilities2);
792 ret = vmw_dma_masks(dev_priv);
793 if (unlikely(ret != 0))
796 dma_set_max_seg_size(dev->dev, U32_MAX);
798 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
799 DRM_INFO("Max GMR ids is %u\n",
800 (unsigned)dev_priv->max_gmr_ids);
801 DRM_INFO("Max number of GMR pages is %u\n",
802 (unsigned)dev_priv->max_gmr_pages);
803 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
804 (unsigned)dev_priv->memory_size / 1024);
806 DRM_INFO("Maximum display memory size is %u kiB\n",
807 dev_priv->prim_bb_mem / 1024);
808 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
809 dev_priv->vram_start, dev_priv->vram_size / 1024);
810 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
811 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
813 dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
814 dev_priv->mmio_size, MEMREMAP_WB);
816 if (unlikely(dev_priv->mmio_virt == NULL)) {
818 DRM_ERROR("Failed mapping MMIO.\n");
822 /* Need mmio memory to check for fifo pitchlock cap. */
823 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
824 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
825 !vmw_fifo_have_pitchlock(dev_priv)) {
827 DRM_ERROR("Hardware has no pitchlock\n");
831 dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
832 &vmw_prime_dmabuf_ops);
834 if (unlikely(dev_priv->tdev == NULL)) {
835 DRM_ERROR("Unable to initialize TTM object management.\n");
840 dev->dev_private = dev_priv;
842 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
843 dev_priv->stealth = (ret != 0);
844 if (dev_priv->stealth) {
846 * Request at least the mmio PCI resource.
849 DRM_INFO("It appears like vesafb is loaded. "
850 "Ignore above error if any.\n");
851 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
852 if (unlikely(ret != 0)) {
853 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
858 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
859 ret = vmw_irq_install(dev, dev->pdev->irq);
861 DRM_ERROR("Failed installing irq: %d\n", ret);
866 dev_priv->fman = vmw_fence_manager_init(dev_priv);
867 if (unlikely(dev_priv->fman == NULL)) {
872 drm_vma_offset_manager_init(&dev_priv->vma_manager,
873 DRM_FILE_PAGE_OFFSET_START,
874 DRM_FILE_PAGE_OFFSET_SIZE);
875 ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver,
877 dev->anon_inode->i_mapping,
878 &dev_priv->vma_manager,
879 dev_priv->map_mode == vmw_dma_alloc_coherent,
881 if (unlikely(ret != 0)) {
882 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
887 * Enable VRAM, but initially don't use it until SVGA is enabled and
891 ret = vmw_vram_manager_init(dev_priv);
892 if (unlikely(ret != 0)) {
893 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
898 * "Guest Memory Regions" is an aperture like feature with
899 * one slot per bo. There is an upper limit of the number of
900 * slots as well as the bo size.
902 dev_priv->has_gmr = true;
903 /* TODO: This is most likely not correct */
904 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
906 vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
907 DRM_INFO("No GMR memory available. "
908 "Graphics memory resources are very limited.\n");
909 dev_priv->has_gmr = false;
912 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
913 dev_priv->has_mob = true;
915 if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
916 DRM_INFO("No MOB memory available. "
917 "3D will be disabled.\n");
918 dev_priv->has_mob = false;
922 if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) {
923 spin_lock(&dev_priv->cap_lock);
924 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
925 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
926 dev_priv->sm_type = VMW_SM_4;
927 spin_unlock(&dev_priv->cap_lock);
930 vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
932 /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
933 if (has_sm4_context(dev_priv) &&
934 (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
935 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41);
937 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
938 dev_priv->sm_type = VMW_SM_4_1;
940 if (has_sm4_1_context(dev_priv) &&
941 (dev_priv->capabilities2 & SVGA_CAP2_DX3)) {
942 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM5);
943 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP))
944 dev_priv->sm_type = VMW_SM_5;
948 ret = vmw_kms_init(dev_priv);
949 if (unlikely(ret != 0))
951 vmw_overlay_init(dev_priv);
953 ret = vmw_request_device(dev_priv);
957 DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
959 if (dev_priv->sm_type == VMW_SM_5)
960 DRM_INFO("SM5 support available.\n");
961 if (dev_priv->sm_type == VMW_SM_4_1)
962 DRM_INFO("SM4_1 support available.\n");
963 if (dev_priv->sm_type == VMW_SM_4)
964 DRM_INFO("SM4 support available.\n");
966 snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
967 VMWGFX_REPO, VMWGFX_GIT_VERSION);
968 vmw_host_log(host_log);
970 memset(host_log, 0, sizeof(host_log));
971 snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
972 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
973 VMWGFX_DRIVER_PATCHLEVEL);
974 vmw_host_log(host_log);
976 if (dev_priv->enable_fb) {
977 vmw_fifo_resource_inc(dev_priv);
978 vmw_svga_enable(dev_priv);
979 vmw_fb_init(dev_priv);
982 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
983 register_pm_notifier(&dev_priv->pm_nb);
988 vmw_overlay_close(dev_priv);
989 vmw_kms_close(dev_priv);
991 if (dev_priv->has_mob)
992 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
993 if (dev_priv->has_gmr)
994 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
995 vmw_vram_manager_fini(dev_priv);
997 (void)ttm_bo_device_release(&dev_priv->bdev);
999 vmw_fence_manager_takedown(dev_priv->fman);
1001 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1002 vmw_irq_uninstall(dev_priv->dev);
1004 if (dev_priv->stealth)
1005 pci_release_region(dev->pdev, 2);
1007 pci_release_regions(dev->pdev);
1009 ttm_object_device_release(&dev_priv->tdev);
1011 memunmap(dev_priv->mmio_virt);
1013 for (i = vmw_res_context; i < vmw_res_max; ++i)
1014 idr_destroy(&dev_priv->res_idr[i]);
1016 if (dev_priv->ctx.staged_bindings)
1017 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1022 static void vmw_driver_unload(struct drm_device *dev)
1024 struct vmw_private *dev_priv = vmw_priv(dev);
1025 enum vmw_res_type i;
1027 unregister_pm_notifier(&dev_priv->pm_nb);
1029 if (dev_priv->ctx.res_ht_initialized)
1030 drm_ht_remove(&dev_priv->ctx.res_ht);
1031 vfree(dev_priv->ctx.cmd_bounce);
1032 if (dev_priv->enable_fb) {
1033 vmw_fb_off(dev_priv);
1034 vmw_fb_close(dev_priv);
1035 vmw_fifo_resource_dec(dev_priv);
1036 vmw_svga_disable(dev_priv);
1039 vmw_kms_close(dev_priv);
1040 vmw_overlay_close(dev_priv);
1042 if (dev_priv->has_gmr)
1043 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
1045 vmw_release_device_early(dev_priv);
1046 if (dev_priv->has_mob)
1047 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
1048 vmw_vram_manager_fini(dev_priv);
1049 (void) ttm_bo_device_release(&dev_priv->bdev);
1050 drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
1051 vmw_release_device_late(dev_priv);
1052 vmw_fence_manager_takedown(dev_priv->fman);
1053 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1054 vmw_irq_uninstall(dev_priv->dev);
1055 if (dev_priv->stealth)
1056 pci_release_region(dev->pdev, 2);
1058 pci_release_regions(dev->pdev);
1060 ttm_object_device_release(&dev_priv->tdev);
1061 memunmap(dev_priv->mmio_virt);
1062 if (dev_priv->ctx.staged_bindings)
1063 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1065 for (i = vmw_res_context; i < vmw_res_max; ++i)
1066 idr_destroy(&dev_priv->res_idr[i]);
1071 static void vmw_postclose(struct drm_device *dev,
1072 struct drm_file *file_priv)
1074 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1076 ttm_object_file_release(&vmw_fp->tfile);
1080 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1082 struct vmw_private *dev_priv = vmw_priv(dev);
1083 struct vmw_fpriv *vmw_fp;
1086 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1087 if (unlikely(!vmw_fp))
1090 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1091 if (unlikely(vmw_fp->tfile == NULL))
1094 file_priv->driver_priv = vmw_fp;
1103 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1105 long (*ioctl_func)(struct file *, unsigned int,
1108 struct drm_file *file_priv = filp->private_data;
1109 struct drm_device *dev = file_priv->minor->dev;
1110 unsigned int nr = DRM_IOCTL_NR(cmd);
1114 * Do extra checking on driver private ioctls.
1117 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1118 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1119 const struct drm_ioctl_desc *ioctl =
1120 &vmw_ioctls[nr - DRM_COMMAND_BASE];
1122 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1123 return ioctl_func(filp, cmd, arg);
1124 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1125 if (!drm_is_current_master(file_priv) &&
1126 !capable(CAP_SYS_ADMIN))
1130 if (unlikely(ioctl->cmd != cmd))
1131 goto out_io_encoding;
1133 flags = ioctl->flags;
1134 } else if (!drm_ioctl_flags(nr, &flags))
1137 return ioctl_func(filp, cmd, arg);
1140 DRM_ERROR("Invalid command format, ioctl %d\n",
1141 nr - DRM_COMMAND_BASE);
1146 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1149 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1152 #ifdef CONFIG_COMPAT
1153 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1156 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1160 static void vmw_master_set(struct drm_device *dev,
1161 struct drm_file *file_priv,
1165 * Inform a new master that the layout may have changed while
1169 drm_sysfs_hotplug_event(dev);
1172 static void vmw_master_drop(struct drm_device *dev,
1173 struct drm_file *file_priv)
1175 struct vmw_private *dev_priv = vmw_priv(dev);
1177 vmw_kms_legacy_hotspot_clear(dev_priv);
1178 if (!dev_priv->enable_fb)
1179 vmw_svga_disable(dev_priv);
1183 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1185 * @dev_priv: Pointer to device private struct.
1186 * Needs the reservation sem to be held in non-exclusive mode.
1188 static void __vmw_svga_enable(struct vmw_private *dev_priv)
1190 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1192 spin_lock(&dev_priv->svga_lock);
1193 if (!ttm_resource_manager_used(man)) {
1194 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1195 ttm_resource_manager_set_used(man, true);
1197 spin_unlock(&dev_priv->svga_lock);
1201 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1203 * @dev_priv: Pointer to device private struct.
1205 void vmw_svga_enable(struct vmw_private *dev_priv)
1207 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
1208 __vmw_svga_enable(dev_priv);
1209 ttm_read_unlock(&dev_priv->reservation_sem);
1213 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1215 * @dev_priv: Pointer to device private struct.
1216 * Needs the reservation sem to be held in exclusive mode.
1217 * Will not empty VRAM. VRAM must be emptied by caller.
1219 static void __vmw_svga_disable(struct vmw_private *dev_priv)
1221 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1223 spin_lock(&dev_priv->svga_lock);
1224 if (ttm_resource_manager_used(man)) {
1225 ttm_resource_manager_set_used(man, false);
1226 vmw_write(dev_priv, SVGA_REG_ENABLE,
1227 SVGA_REG_ENABLE_HIDE |
1228 SVGA_REG_ENABLE_ENABLE);
1230 spin_unlock(&dev_priv->svga_lock);
1234 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1237 * @dev_priv: Pointer to device private struct.
1240 void vmw_svga_disable(struct vmw_private *dev_priv)
1242 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
1244 * Disabling SVGA will turn off device modesetting capabilities, so
1245 * notify KMS about that so that it doesn't cache atomic state that
1246 * isn't valid anymore, for example crtcs turned on.
1247 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1248 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1249 * end up with lock order reversal. Thus, a master may actually perform
1250 * a new modeset just after we call vmw_kms_lost_device() and race with
1251 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1252 * to be inconsistent with the device, causing modesetting problems.
1255 vmw_kms_lost_device(dev_priv->dev);
1256 ttm_write_lock(&dev_priv->reservation_sem, false);
1257 spin_lock(&dev_priv->svga_lock);
1258 if (ttm_resource_manager_used(man)) {
1259 ttm_resource_manager_set_used(man, false);
1260 spin_unlock(&dev_priv->svga_lock);
1261 if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
1262 DRM_ERROR("Failed evicting VRAM buffers.\n");
1263 vmw_write(dev_priv, SVGA_REG_ENABLE,
1264 SVGA_REG_ENABLE_HIDE |
1265 SVGA_REG_ENABLE_ENABLE);
1267 spin_unlock(&dev_priv->svga_lock);
1268 ttm_write_unlock(&dev_priv->reservation_sem);
1271 static void vmw_remove(struct pci_dev *pdev)
1273 struct drm_device *dev = pci_get_drvdata(pdev);
1275 drm_dev_unregister(dev);
1276 vmw_driver_unload(dev);
1278 pci_disable_device(pdev);
1281 static unsigned long
1282 vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
1283 unsigned long len, unsigned long pgoff,
1284 unsigned long flags)
1286 struct drm_file *file_priv = file->private_data;
1287 struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
1289 return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
1290 &dev_priv->vma_manager);
1293 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1296 struct vmw_private *dev_priv =
1297 container_of(nb, struct vmw_private, pm_nb);
1300 case PM_HIBERNATION_PREPARE:
1302 * Take the reservation sem in write mode, which will make sure
1303 * there are no other processes holding a buffer object
1304 * reservation, meaning we should be able to evict all buffer
1305 * objects if needed.
1306 * Once user-space processes have been frozen, we can release
1309 ttm_suspend_lock(&dev_priv->reservation_sem);
1310 dev_priv->suspend_locked = true;
1312 case PM_POST_HIBERNATION:
1313 case PM_POST_RESTORE:
1314 if (READ_ONCE(dev_priv->suspend_locked)) {
1315 dev_priv->suspend_locked = false;
1316 ttm_suspend_unlock(&dev_priv->reservation_sem);
1325 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1327 struct drm_device *dev = pci_get_drvdata(pdev);
1328 struct vmw_private *dev_priv = vmw_priv(dev);
1330 if (dev_priv->refuse_hibernation)
1333 pci_save_state(pdev);
1334 pci_disable_device(pdev);
1335 pci_set_power_state(pdev, PCI_D3hot);
1339 static int vmw_pci_resume(struct pci_dev *pdev)
1341 pci_set_power_state(pdev, PCI_D0);
1342 pci_restore_state(pdev);
1343 return pci_enable_device(pdev);
1346 static int vmw_pm_suspend(struct device *kdev)
1348 struct pci_dev *pdev = to_pci_dev(kdev);
1349 struct pm_message dummy;
1353 return vmw_pci_suspend(pdev, dummy);
1356 static int vmw_pm_resume(struct device *kdev)
1358 struct pci_dev *pdev = to_pci_dev(kdev);
1360 return vmw_pci_resume(pdev);
1363 static int vmw_pm_freeze(struct device *kdev)
1365 struct pci_dev *pdev = to_pci_dev(kdev);
1366 struct drm_device *dev = pci_get_drvdata(pdev);
1367 struct vmw_private *dev_priv = vmw_priv(dev);
1368 struct ttm_operation_ctx ctx = {
1369 .interruptible = false,
1370 .no_wait_gpu = false
1375 * Unlock for vmw_kms_suspend.
1376 * No user-space processes should be running now.
1378 ttm_suspend_unlock(&dev_priv->reservation_sem);
1379 ret = vmw_kms_suspend(dev_priv->dev);
1381 ttm_suspend_lock(&dev_priv->reservation_sem);
1382 DRM_ERROR("Failed to freeze modesetting.\n");
1385 if (dev_priv->enable_fb)
1386 vmw_fb_off(dev_priv);
1388 ttm_suspend_lock(&dev_priv->reservation_sem);
1389 vmw_execbuf_release_pinned_bo(dev_priv);
1390 vmw_resource_evict_all(dev_priv);
1391 vmw_release_device_early(dev_priv);
1392 while (ttm_bo_swapout(&ctx) == 0);
1393 if (dev_priv->enable_fb)
1394 vmw_fifo_resource_dec(dev_priv);
1395 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1396 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1397 if (dev_priv->enable_fb)
1398 vmw_fifo_resource_inc(dev_priv);
1399 WARN_ON(vmw_request_device_late(dev_priv));
1400 dev_priv->suspend_locked = false;
1401 ttm_suspend_unlock(&dev_priv->reservation_sem);
1402 if (dev_priv->suspend_state)
1403 vmw_kms_resume(dev);
1404 if (dev_priv->enable_fb)
1405 vmw_fb_on(dev_priv);
1409 vmw_fence_fifo_down(dev_priv->fman);
1410 __vmw_svga_disable(dev_priv);
1412 vmw_release_device_late(dev_priv);
1416 static int vmw_pm_restore(struct device *kdev)
1418 struct pci_dev *pdev = to_pci_dev(kdev);
1419 struct drm_device *dev = pci_get_drvdata(pdev);
1420 struct vmw_private *dev_priv = vmw_priv(dev);
1423 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1424 (void) vmw_read(dev_priv, SVGA_REG_ID);
1426 if (dev_priv->enable_fb)
1427 vmw_fifo_resource_inc(dev_priv);
1429 ret = vmw_request_device(dev_priv);
1433 if (dev_priv->enable_fb)
1434 __vmw_svga_enable(dev_priv);
1436 vmw_fence_fifo_up(dev_priv->fman);
1437 dev_priv->suspend_locked = false;
1438 ttm_suspend_unlock(&dev_priv->reservation_sem);
1439 if (dev_priv->suspend_state)
1440 vmw_kms_resume(dev_priv->dev);
1442 if (dev_priv->enable_fb)
1443 vmw_fb_on(dev_priv);
1448 static const struct dev_pm_ops vmw_pm_ops = {
1449 .freeze = vmw_pm_freeze,
1450 .thaw = vmw_pm_restore,
1451 .restore = vmw_pm_restore,
1452 .suspend = vmw_pm_suspend,
1453 .resume = vmw_pm_resume,
1456 static const struct file_operations vmwgfx_driver_fops = {
1457 .owner = THIS_MODULE,
1459 .release = drm_release,
1460 .unlocked_ioctl = vmw_unlocked_ioctl,
1462 .poll = vmw_fops_poll,
1463 .read = vmw_fops_read,
1464 #if defined(CONFIG_COMPAT)
1465 .compat_ioctl = vmw_compat_ioctl,
1467 .llseek = noop_llseek,
1468 .get_unmapped_area = vmw_get_unmapped_area,
1471 static const struct drm_driver driver = {
1473 DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
1474 .ioctls = vmw_ioctls,
1475 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1476 .master_set = vmw_master_set,
1477 .master_drop = vmw_master_drop,
1478 .open = vmw_driver_open,
1479 .postclose = vmw_postclose,
1481 .dumb_create = vmw_dumb_create,
1482 .dumb_map_offset = vmw_dumb_map_offset,
1483 .dumb_destroy = vmw_dumb_destroy,
1485 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1486 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1488 .fops = &vmwgfx_driver_fops,
1489 .name = VMWGFX_DRIVER_NAME,
1490 .desc = VMWGFX_DRIVER_DESC,
1491 .date = VMWGFX_DRIVER_DATE,
1492 .major = VMWGFX_DRIVER_MAJOR,
1493 .minor = VMWGFX_DRIVER_MINOR,
1494 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1497 static struct pci_driver vmw_pci_driver = {
1498 .name = VMWGFX_DRIVER_NAME,
1499 .id_table = vmw_pci_id_list,
1501 .remove = vmw_remove,
1507 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1509 struct drm_device *dev;
1512 ret = pci_enable_device(pdev);
1516 dev = drm_dev_alloc(&driver, &pdev->dev);
1519 goto err_pci_disable_device;
1523 pci_set_drvdata(pdev, dev);
1525 ret = vmw_driver_load(dev, ent->driver_data);
1527 goto err_drm_dev_put;
1529 ret = drm_dev_register(dev, ent->driver_data);
1531 goto err_vmw_driver_unload;
1535 err_vmw_driver_unload:
1536 vmw_driver_unload(dev);
1539 err_pci_disable_device:
1540 pci_disable_device(pdev);
1544 static int __init vmwgfx_init(void)
1548 if (vgacon_text_force())
1551 ret = pci_register_driver(&vmw_pci_driver);
1553 DRM_ERROR("Failed initializing DRM.\n");
1557 static void __exit vmwgfx_exit(void)
1559 pci_unregister_driver(&vmw_pci_driver);
1562 module_init(vmwgfx_init);
1563 module_exit(vmwgfx_exit);
1565 MODULE_AUTHOR("VMware Inc. and others");
1566 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1567 MODULE_LICENSE("GPL and additional rights");
1568 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1569 __stringify(VMWGFX_DRIVER_MINOR) "."
1570 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."