1 /**************************************************************************
3 * Copyright © 2009-2016 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include <linux/module.h>
28 #include <linux/console.h>
31 #include "vmwgfx_drv.h"
32 #include "vmwgfx_binding.h"
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_object.h>
36 #include <drm/ttm/ttm_module.h>
37 #include <linux/dma_remapping.h>
39 #define VMWGFX_DRIVER_NAME "vmwgfx"
40 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
41 #define VMWGFX_CHIP_SVGAII 0
42 #define VMW_FB_RESERVATION 0
44 #define VMW_MIN_INITIAL_WIDTH 800
45 #define VMW_MIN_INITIAL_HEIGHT 600
47 #ifndef VMWGFX_GIT_VERSION
48 #define VMWGFX_GIT_VERSION "Unknown"
51 #define VMWGFX_REPO "In Tree"
55 * Fully encoded drm commands. Might move to vmw_drm.h
58 #define DRM_IOCTL_VMW_GET_PARAM \
59 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
60 struct drm_vmw_getparam_arg)
61 #define DRM_IOCTL_VMW_ALLOC_DMABUF \
62 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
63 union drm_vmw_alloc_dmabuf_arg)
64 #define DRM_IOCTL_VMW_UNREF_DMABUF \
65 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
66 struct drm_vmw_unref_dmabuf_arg)
67 #define DRM_IOCTL_VMW_CURSOR_BYPASS \
68 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
69 struct drm_vmw_cursor_bypass_arg)
71 #define DRM_IOCTL_VMW_CONTROL_STREAM \
72 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
73 struct drm_vmw_control_stream_arg)
74 #define DRM_IOCTL_VMW_CLAIM_STREAM \
75 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
76 struct drm_vmw_stream_arg)
77 #define DRM_IOCTL_VMW_UNREF_STREAM \
78 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
79 struct drm_vmw_stream_arg)
81 #define DRM_IOCTL_VMW_CREATE_CONTEXT \
82 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
83 struct drm_vmw_context_arg)
84 #define DRM_IOCTL_VMW_UNREF_CONTEXT \
85 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
86 struct drm_vmw_context_arg)
87 #define DRM_IOCTL_VMW_CREATE_SURFACE \
88 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
89 union drm_vmw_surface_create_arg)
90 #define DRM_IOCTL_VMW_UNREF_SURFACE \
91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
92 struct drm_vmw_surface_arg)
93 #define DRM_IOCTL_VMW_REF_SURFACE \
94 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
95 union drm_vmw_surface_reference_arg)
96 #define DRM_IOCTL_VMW_EXECBUF \
97 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
98 struct drm_vmw_execbuf_arg)
99 #define DRM_IOCTL_VMW_GET_3D_CAP \
100 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
101 struct drm_vmw_get_3d_cap_arg)
102 #define DRM_IOCTL_VMW_FENCE_WAIT \
103 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
104 struct drm_vmw_fence_wait_arg)
105 #define DRM_IOCTL_VMW_FENCE_SIGNALED \
106 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
107 struct drm_vmw_fence_signaled_arg)
108 #define DRM_IOCTL_VMW_FENCE_UNREF \
109 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
110 struct drm_vmw_fence_arg)
111 #define DRM_IOCTL_VMW_FENCE_EVENT \
112 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
113 struct drm_vmw_fence_event_arg)
114 #define DRM_IOCTL_VMW_PRESENT \
115 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
116 struct drm_vmw_present_arg)
117 #define DRM_IOCTL_VMW_PRESENT_READBACK \
118 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
119 struct drm_vmw_present_readback_arg)
120 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
121 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
122 struct drm_vmw_update_layout_arg)
123 #define DRM_IOCTL_VMW_CREATE_SHADER \
124 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
125 struct drm_vmw_shader_create_arg)
126 #define DRM_IOCTL_VMW_UNREF_SHADER \
127 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
128 struct drm_vmw_shader_arg)
129 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
130 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
131 union drm_vmw_gb_surface_create_arg)
132 #define DRM_IOCTL_VMW_GB_SURFACE_REF \
133 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
134 union drm_vmw_gb_surface_reference_arg)
135 #define DRM_IOCTL_VMW_SYNCCPU \
136 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
137 struct drm_vmw_synccpu_arg)
138 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
139 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
140 struct drm_vmw_context_arg)
143 * The core DRM version of this macro doesn't account for
147 #define VMW_IOCTL_DEF(ioctl, func, flags) \
148 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
154 static const struct drm_ioctl_desc vmw_ioctls[] = {
155 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
156 DRM_AUTH | DRM_RENDER_ALLOW),
157 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
158 DRM_AUTH | DRM_RENDER_ALLOW),
159 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
161 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
162 vmw_kms_cursor_bypass_ioctl,
163 DRM_MASTER | DRM_CONTROL_ALLOW),
165 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
166 DRM_MASTER | DRM_CONTROL_ALLOW),
167 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
168 DRM_MASTER | DRM_CONTROL_ALLOW),
169 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
170 DRM_MASTER | DRM_CONTROL_ALLOW),
172 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
173 DRM_AUTH | DRM_RENDER_ALLOW),
174 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
176 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
177 DRM_AUTH | DRM_RENDER_ALLOW),
178 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
180 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
181 DRM_AUTH | DRM_RENDER_ALLOW),
182 VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
184 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
186 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
187 vmw_fence_obj_signaled_ioctl,
189 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
191 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
192 DRM_AUTH | DRM_RENDER_ALLOW),
193 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
194 DRM_AUTH | DRM_RENDER_ALLOW),
196 /* these allow direct access to the framebuffers mark as master only */
197 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
198 DRM_MASTER | DRM_AUTH),
199 VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
200 vmw_present_readback_ioctl,
201 DRM_MASTER | DRM_AUTH),
203 * The permissions of the below ioctl are overridden in
204 * vmw_generic_ioctl(). We require either
205 * DRM_MASTER or capable(CAP_SYS_ADMIN).
207 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
208 vmw_kms_update_layout_ioctl,
210 VMW_IOCTL_DEF(VMW_CREATE_SHADER,
211 vmw_shader_define_ioctl,
212 DRM_AUTH | DRM_RENDER_ALLOW),
213 VMW_IOCTL_DEF(VMW_UNREF_SHADER,
214 vmw_shader_destroy_ioctl,
216 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
217 vmw_gb_surface_define_ioctl,
218 DRM_AUTH | DRM_RENDER_ALLOW),
219 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
220 vmw_gb_surface_reference_ioctl,
221 DRM_AUTH | DRM_RENDER_ALLOW),
222 VMW_IOCTL_DEF(VMW_SYNCCPU,
223 vmw_user_dmabuf_synccpu_ioctl,
225 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
226 vmw_extended_context_define_ioctl,
227 DRM_AUTH | DRM_RENDER_ALLOW),
230 static struct pci_device_id vmw_pci_id_list[] = {
231 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
234 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
236 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
237 static int vmw_force_iommu;
238 static int vmw_restrict_iommu;
239 static int vmw_force_coherent;
240 static int vmw_restrict_dma_mask;
241 static int vmw_assume_16bpp;
243 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
244 static void vmw_master_init(struct vmw_master *);
245 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
248 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
249 module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR);
250 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
251 module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR);
252 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
253 module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR);
254 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
255 module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR);
256 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
257 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
258 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
259 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
262 static void vmw_print_capabilities(uint32_t capabilities)
264 DRM_INFO("Capabilities:\n");
265 if (capabilities & SVGA_CAP_RECT_COPY)
266 DRM_INFO(" Rect copy.\n");
267 if (capabilities & SVGA_CAP_CURSOR)
268 DRM_INFO(" Cursor.\n");
269 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
270 DRM_INFO(" Cursor bypass.\n");
271 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
272 DRM_INFO(" Cursor bypass 2.\n");
273 if (capabilities & SVGA_CAP_8BIT_EMULATION)
274 DRM_INFO(" 8bit emulation.\n");
275 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
276 DRM_INFO(" Alpha cursor.\n");
277 if (capabilities & SVGA_CAP_3D)
279 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
280 DRM_INFO(" Extended Fifo.\n");
281 if (capabilities & SVGA_CAP_MULTIMON)
282 DRM_INFO(" Multimon.\n");
283 if (capabilities & SVGA_CAP_PITCHLOCK)
284 DRM_INFO(" Pitchlock.\n");
285 if (capabilities & SVGA_CAP_IRQMASK)
286 DRM_INFO(" Irq mask.\n");
287 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
288 DRM_INFO(" Display Topology.\n");
289 if (capabilities & SVGA_CAP_GMR)
291 if (capabilities & SVGA_CAP_TRACES)
292 DRM_INFO(" Traces.\n");
293 if (capabilities & SVGA_CAP_GMR2)
294 DRM_INFO(" GMR2.\n");
295 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
296 DRM_INFO(" Screen Object 2.\n");
297 if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
298 DRM_INFO(" Command Buffers.\n");
299 if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
300 DRM_INFO(" Command Buffers 2.\n");
301 if (capabilities & SVGA_CAP_GBOBJECTS)
302 DRM_INFO(" Guest Backed Resources.\n");
303 if (capabilities & SVGA_CAP_DX)
304 DRM_INFO(" DX Features.\n");
308 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
310 * @dev_priv: A device private structure.
312 * This function creates a small buffer object that holds the query
313 * result for dummy queries emitted as query barriers.
314 * The function will then map the first page and initialize a pending
315 * occlusion query result structure, Finally it will unmap the buffer.
316 * No interruptible waits are done within this function.
318 * Returns an error if bo creation or initialization fails.
320 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
323 struct vmw_dma_buffer *vbo;
324 struct ttm_bo_kmap_obj map;
325 volatile SVGA3dQueryResult *result;
329 * Create the vbo as pinned, so that a tryreserve will
330 * immediately succeed. This is because we're the only
331 * user of the bo currently.
333 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
337 ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
338 &vmw_sys_ne_placement, false,
339 &vmw_dmabuf_bo_free);
340 if (unlikely(ret != 0))
343 ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
345 vmw_bo_pin_reserved(vbo, true);
347 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
348 if (likely(ret == 0)) {
349 result = ttm_kmap_obj_virtual(&map, &dummy);
350 result->totalSize = sizeof(*result);
351 result->state = SVGA3D_QUERYSTATE_PENDING;
352 result->result32 = 0xff;
355 vmw_bo_pin_reserved(vbo, false);
356 ttm_bo_unreserve(&vbo->base);
358 if (unlikely(ret != 0)) {
359 DRM_ERROR("Dummy query buffer map failed.\n");
360 vmw_dmabuf_unreference(&vbo);
362 dev_priv->dummy_query_bo = vbo;
368 * vmw_request_device_late - Perform late device setup
370 * @dev_priv: Pointer to device private.
372 * This function performs setup of otables and enables large command
373 * buffer submission. These tasks are split out to a separate function
374 * because it reverts vmw_release_device_early and is intended to be used
375 * by an error path in the hibernation code.
377 static int vmw_request_device_late(struct vmw_private *dev_priv)
381 if (dev_priv->has_mob) {
382 ret = vmw_otables_setup(dev_priv);
383 if (unlikely(ret != 0)) {
384 DRM_ERROR("Unable to initialize "
385 "guest Memory OBjects.\n");
390 if (dev_priv->cman) {
391 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
394 struct vmw_cmdbuf_man *man = dev_priv->cman;
396 dev_priv->cman = NULL;
397 vmw_cmdbuf_man_destroy(man);
404 static int vmw_request_device(struct vmw_private *dev_priv)
408 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
409 if (unlikely(ret != 0)) {
410 DRM_ERROR("Unable to initialize FIFO.\n");
413 vmw_fence_fifo_up(dev_priv->fman);
414 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
415 if (IS_ERR(dev_priv->cman)) {
416 dev_priv->cman = NULL;
417 dev_priv->has_dx = false;
420 ret = vmw_request_device_late(dev_priv);
424 ret = vmw_dummy_query_bo_create(dev_priv);
425 if (unlikely(ret != 0))
426 goto out_no_query_bo;
432 vmw_cmdbuf_remove_pool(dev_priv->cman);
433 if (dev_priv->has_mob) {
434 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
435 vmw_otables_takedown(dev_priv);
438 vmw_cmdbuf_man_destroy(dev_priv->cman);
440 vmw_fence_fifo_down(dev_priv->fman);
441 vmw_fifo_release(dev_priv, &dev_priv->fifo);
446 * vmw_release_device_early - Early part of fifo takedown.
448 * @dev_priv: Pointer to device private struct.
450 * This is the first part of command submission takedown, to be called before
451 * buffer management is taken down.
453 static void vmw_release_device_early(struct vmw_private *dev_priv)
456 * Previous destructions should've released
460 BUG_ON(dev_priv->pinned_bo != NULL);
462 vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
464 vmw_cmdbuf_remove_pool(dev_priv->cman);
466 if (dev_priv->has_mob) {
467 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
468 vmw_otables_takedown(dev_priv);
473 * vmw_release_device_late - Late part of fifo takedown.
475 * @dev_priv: Pointer to device private struct.
477 * This is the last part of the command submission takedown, to be called when
478 * command submission is no longer needed. It may wait on pending fences.
480 static void vmw_release_device_late(struct vmw_private *dev_priv)
482 vmw_fence_fifo_down(dev_priv->fman);
484 vmw_cmdbuf_man_destroy(dev_priv->cman);
486 vmw_fifo_release(dev_priv, &dev_priv->fifo);
490 * Sets the initial_[width|height] fields on the given vmw_private.
492 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
493 * clamping the value to fb_max_[width|height] fields and the
494 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
495 * If the values appear to be invalid, set them to
496 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
498 static void vmw_get_initial_size(struct vmw_private *dev_priv)
503 width = vmw_read(dev_priv, SVGA_REG_WIDTH);
504 height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
506 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
507 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
509 if (width > dev_priv->fb_max_width ||
510 height > dev_priv->fb_max_height) {
513 * This is a host error and shouldn't occur.
516 width = VMW_MIN_INITIAL_WIDTH;
517 height = VMW_MIN_INITIAL_HEIGHT;
520 dev_priv->initial_width = width;
521 dev_priv->initial_height = height;
525 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
528 * @dev_priv: Pointer to a struct vmw_private
530 * This functions tries to determine the IOMMU setup and what actions
531 * need to be taken by the driver to make system pages visible to the
533 * If this function decides that DMA is not possible, it returns -EINVAL.
534 * The driver may then try to disable features of the device that require
537 static int vmw_dma_select_mode(struct vmw_private *dev_priv)
539 static const char *names[vmw_dma_map_max] = {
540 [vmw_dma_phys] = "Using physical TTM page addresses.",
541 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
542 [vmw_dma_map_populate] = "Keeping DMA mappings.",
543 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
545 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
547 #ifdef CONFIG_INTEL_IOMMU
548 if (intel_iommu_enabled) {
549 dev_priv->map_mode = vmw_dma_map_populate;
554 if (!(vmw_force_iommu || vmw_force_coherent)) {
555 dev_priv->map_mode = vmw_dma_phys;
556 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
560 dev_priv->map_mode = vmw_dma_map_populate;
562 if (dma_ops->sync_single_for_cpu)
563 dev_priv->map_mode = vmw_dma_alloc_coherent;
564 #ifdef CONFIG_SWIOTLB
565 if (swiotlb_nr_tbl() == 0)
566 dev_priv->map_mode = vmw_dma_map_populate;
569 #ifdef CONFIG_INTEL_IOMMU
572 if (dev_priv->map_mode == vmw_dma_map_populate &&
574 dev_priv->map_mode = vmw_dma_map_bind;
576 if (vmw_force_coherent)
577 dev_priv->map_mode = vmw_dma_alloc_coherent;
579 #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
581 * No coherent page pool
583 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
587 #else /* CONFIG_X86 */
588 dev_priv->map_mode = vmw_dma_map_populate;
589 #endif /* CONFIG_X86 */
591 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
597 * vmw_dma_masks - set required page- and dma masks
599 * @dev: Pointer to struct drm-device
601 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
602 * restriction also for 64-bit systems.
604 #ifdef CONFIG_INTEL_IOMMU
605 static int vmw_dma_masks(struct vmw_private *dev_priv)
607 struct drm_device *dev = dev_priv->dev;
610 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
611 if (dev_priv->map_mode != vmw_dma_phys &&
612 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
613 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
614 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
620 static int vmw_dma_masks(struct vmw_private *dev_priv)
626 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
628 struct vmw_private *dev_priv;
632 bool refuse_dma = false;
633 char host_log[100] = {0};
635 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
636 if (unlikely(dev_priv == NULL)) {
637 DRM_ERROR("Failed allocating a device private struct.\n");
641 pci_set_master(dev->pdev);
644 dev_priv->vmw_chipset = chipset;
645 dev_priv->last_read_seqno = (uint32_t) -100;
646 mutex_init(&dev_priv->cmdbuf_mutex);
647 mutex_init(&dev_priv->release_mutex);
648 mutex_init(&dev_priv->binding_mutex);
649 mutex_init(&dev_priv->global_kms_state_mutex);
650 rwlock_init(&dev_priv->resource_lock);
651 ttm_lock_init(&dev_priv->reservation_sem);
652 spin_lock_init(&dev_priv->hw_lock);
653 spin_lock_init(&dev_priv->waiter_lock);
654 spin_lock_init(&dev_priv->cap_lock);
655 spin_lock_init(&dev_priv->svga_lock);
657 for (i = vmw_res_context; i < vmw_res_max; ++i) {
658 idr_init(&dev_priv->res_idr[i]);
659 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
662 mutex_init(&dev_priv->init_mutex);
663 init_waitqueue_head(&dev_priv->fence_queue);
664 init_waitqueue_head(&dev_priv->fifo_queue);
665 dev_priv->fence_queue_waiters = 0;
666 dev_priv->fifo_queue_waiters = 0;
668 dev_priv->used_memory_size = 0;
670 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
671 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
672 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
674 dev_priv->assume_16bpp = !!vmw_assume_16bpp;
676 dev_priv->enable_fb = enable_fbdev;
678 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
679 svga_id = vmw_read(dev_priv, SVGA_REG_ID);
680 if (svga_id != SVGA_ID_2) {
682 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
686 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
687 ret = vmw_dma_select_mode(dev_priv);
688 if (unlikely(ret != 0)) {
689 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
693 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
694 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
695 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
696 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
698 vmw_get_initial_size(dev_priv);
700 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
701 dev_priv->max_gmr_ids =
702 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
703 dev_priv->max_gmr_pages =
704 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
705 dev_priv->memory_size =
706 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
707 dev_priv->memory_size -= dev_priv->vram_size;
710 * An arbitrary limit of 512MiB on surface
711 * memory. But all HWV8 hardware supports GMR2.
713 dev_priv->memory_size = 512*1024*1024;
715 dev_priv->max_mob_pages = 0;
716 dev_priv->max_mob_size = 0;
717 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
720 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
723 * Workaround for low memory 2D VMs to compensate for the
724 * allocation taken by fbdev
726 if (!(dev_priv->capabilities & SVGA_CAP_3D))
729 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
730 dev_priv->prim_bb_mem =
732 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
733 dev_priv->max_mob_size =
734 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
735 dev_priv->stdu_max_width =
736 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
737 dev_priv->stdu_max_height =
738 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
740 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
741 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
742 dev_priv->texture_max_width = vmw_read(dev_priv,
744 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
745 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
746 dev_priv->texture_max_height = vmw_read(dev_priv,
749 dev_priv->texture_max_width = 8192;
750 dev_priv->texture_max_height = 8192;
751 dev_priv->prim_bb_mem = dev_priv->vram_size;
754 vmw_print_capabilities(dev_priv->capabilities);
756 ret = vmw_dma_masks(dev_priv);
757 if (unlikely(ret != 0))
760 if (dev_priv->capabilities & SVGA_CAP_GMR2) {
761 DRM_INFO("Max GMR ids is %u\n",
762 (unsigned)dev_priv->max_gmr_ids);
763 DRM_INFO("Max number of GMR pages is %u\n",
764 (unsigned)dev_priv->max_gmr_pages);
765 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
766 (unsigned)dev_priv->memory_size / 1024);
768 DRM_INFO("Maximum display memory size is %u kiB\n",
769 dev_priv->prim_bb_mem / 1024);
770 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
771 dev_priv->vram_start, dev_priv->vram_size / 1024);
772 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
773 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
775 ret = vmw_ttm_global_init(dev_priv);
776 if (unlikely(ret != 0))
780 vmw_master_init(&dev_priv->fbdev_master);
781 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
782 dev_priv->active_master = &dev_priv->fbdev_master;
784 dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
785 dev_priv->mmio_size, MEMREMAP_WB);
787 if (unlikely(dev_priv->mmio_virt == NULL)) {
789 DRM_ERROR("Failed mapping MMIO.\n");
793 /* Need mmio memory to check for fifo pitchlock cap. */
794 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
795 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
796 !vmw_fifo_have_pitchlock(dev_priv)) {
798 DRM_ERROR("Hardware has no pitchlock\n");
802 dev_priv->tdev = ttm_object_device_init
803 (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
805 if (unlikely(dev_priv->tdev == NULL)) {
806 DRM_ERROR("Unable to initialize TTM object management.\n");
811 dev->dev_private = dev_priv;
813 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
814 dev_priv->stealth = (ret != 0);
815 if (dev_priv->stealth) {
817 * Request at least the mmio PCI resource.
820 DRM_INFO("It appears like vesafb is loaded. "
821 "Ignore above error if any.\n");
822 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
823 if (unlikely(ret != 0)) {
824 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
829 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
830 ret = drm_irq_install(dev, dev->pdev->irq);
832 DRM_ERROR("Failed installing irq: %d\n", ret);
837 dev_priv->fman = vmw_fence_manager_init(dev_priv);
838 if (unlikely(dev_priv->fman == NULL)) {
843 ret = ttm_bo_device_init(&dev_priv->bdev,
844 dev_priv->bo_global_ref.ref.object,
846 dev->anon_inode->i_mapping,
847 VMWGFX_FILE_PAGE_OFFSET,
849 if (unlikely(ret != 0)) {
850 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
855 * Enable VRAM, but initially don't use it until SVGA is enabled and
858 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
859 (dev_priv->vram_size >> PAGE_SHIFT));
860 if (unlikely(ret != 0)) {
861 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
864 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
866 dev_priv->has_gmr = true;
867 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
868 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
870 DRM_INFO("No GMR memory available. "
871 "Graphics memory resources are very limited.\n");
872 dev_priv->has_gmr = false;
875 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
876 dev_priv->has_mob = true;
877 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
879 DRM_INFO("No MOB memory available. "
880 "3D will be disabled.\n");
881 dev_priv->has_mob = false;
885 if (dev_priv->has_mob) {
886 spin_lock(&dev_priv->cap_lock);
887 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
888 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
889 spin_unlock(&dev_priv->cap_lock);
893 ret = vmw_kms_init(dev_priv);
894 if (unlikely(ret != 0))
896 vmw_overlay_init(dev_priv);
898 ret = vmw_request_device(dev_priv);
902 DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
904 snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
905 VMWGFX_REPO, VMWGFX_GIT_VERSION);
906 vmw_host_log(host_log);
908 memset(host_log, 0, sizeof(host_log));
909 snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
910 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
911 VMWGFX_DRIVER_PATCHLEVEL);
912 vmw_host_log(host_log);
914 if (dev_priv->enable_fb) {
915 vmw_fifo_resource_inc(dev_priv);
916 vmw_svga_enable(dev_priv);
917 vmw_fb_init(dev_priv);
920 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
921 register_pm_notifier(&dev_priv->pm_nb);
926 vmw_overlay_close(dev_priv);
927 vmw_kms_close(dev_priv);
929 if (dev_priv->has_mob)
930 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
931 if (dev_priv->has_gmr)
932 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
933 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
935 (void)ttm_bo_device_release(&dev_priv->bdev);
937 vmw_fence_manager_takedown(dev_priv->fman);
939 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
940 drm_irq_uninstall(dev_priv->dev);
942 if (dev_priv->stealth)
943 pci_release_region(dev->pdev, 2);
945 pci_release_regions(dev->pdev);
947 ttm_object_device_release(&dev_priv->tdev);
949 memunmap(dev_priv->mmio_virt);
951 vmw_ttm_global_release(dev_priv);
953 for (i = vmw_res_context; i < vmw_res_max; ++i)
954 idr_destroy(&dev_priv->res_idr[i]);
956 if (dev_priv->ctx.staged_bindings)
957 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
962 static int vmw_driver_unload(struct drm_device *dev)
964 struct vmw_private *dev_priv = vmw_priv(dev);
967 unregister_pm_notifier(&dev_priv->pm_nb);
969 if (dev_priv->ctx.res_ht_initialized)
970 drm_ht_remove(&dev_priv->ctx.res_ht);
971 vfree(dev_priv->ctx.cmd_bounce);
972 if (dev_priv->enable_fb) {
973 vmw_fb_off(dev_priv);
974 vmw_fb_close(dev_priv);
975 vmw_fifo_resource_dec(dev_priv);
976 vmw_svga_disable(dev_priv);
979 vmw_kms_close(dev_priv);
980 vmw_overlay_close(dev_priv);
982 if (dev_priv->has_gmr)
983 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
984 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
986 vmw_release_device_early(dev_priv);
987 if (dev_priv->has_mob)
988 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
989 (void) ttm_bo_device_release(&dev_priv->bdev);
990 vmw_release_device_late(dev_priv);
991 vmw_fence_manager_takedown(dev_priv->fman);
992 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
993 drm_irq_uninstall(dev_priv->dev);
994 if (dev_priv->stealth)
995 pci_release_region(dev->pdev, 2);
997 pci_release_regions(dev->pdev);
999 ttm_object_device_release(&dev_priv->tdev);
1000 memunmap(dev_priv->mmio_virt);
1001 if (dev_priv->ctx.staged_bindings)
1002 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1003 vmw_ttm_global_release(dev_priv);
1005 for (i = vmw_res_context; i < vmw_res_max; ++i)
1006 idr_destroy(&dev_priv->res_idr[i]);
1013 static void vmw_postclose(struct drm_device *dev,
1014 struct drm_file *file_priv)
1016 struct vmw_fpriv *vmw_fp;
1018 vmw_fp = vmw_fpriv(file_priv);
1020 if (vmw_fp->locked_master) {
1021 struct vmw_master *vmaster =
1022 vmw_master(vmw_fp->locked_master);
1024 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1025 ttm_vt_unlock(&vmaster->lock);
1026 drm_master_put(&vmw_fp->locked_master);
1029 ttm_object_file_release(&vmw_fp->tfile);
1033 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1035 struct vmw_private *dev_priv = vmw_priv(dev);
1036 struct vmw_fpriv *vmw_fp;
1039 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1040 if (unlikely(vmw_fp == NULL))
1043 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1044 if (unlikely(vmw_fp->tfile == NULL))
1047 file_priv->driver_priv = vmw_fp;
1056 static struct vmw_master *vmw_master_check(struct drm_device *dev,
1057 struct drm_file *file_priv,
1061 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1062 struct vmw_master *vmaster;
1064 if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
1067 ret = mutex_lock_interruptible(&dev->master_mutex);
1068 if (unlikely(ret != 0))
1069 return ERR_PTR(-ERESTARTSYS);
1071 if (drm_is_current_master(file_priv)) {
1072 mutex_unlock(&dev->master_mutex);
1077 * Check if we were previously master, but now dropped. In that
1078 * case, allow at least render node functionality.
1080 if (vmw_fp->locked_master) {
1081 mutex_unlock(&dev->master_mutex);
1083 if (flags & DRM_RENDER_ALLOW)
1086 DRM_ERROR("Dropped master trying to access ioctl that "
1087 "requires authentication.\n");
1088 return ERR_PTR(-EACCES);
1090 mutex_unlock(&dev->master_mutex);
1093 * Take the TTM lock. Possibly sleep waiting for the authenticating
1094 * master to become master again, or for a SIGTERM if the
1095 * authenticating master exits.
1097 vmaster = vmw_master(file_priv->master);
1098 ret = ttm_read_lock(&vmaster->lock, true);
1099 if (unlikely(ret != 0))
1100 vmaster = ERR_PTR(ret);
1105 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1107 long (*ioctl_func)(struct file *, unsigned int,
1110 struct drm_file *file_priv = filp->private_data;
1111 struct drm_device *dev = file_priv->minor->dev;
1112 unsigned int nr = DRM_IOCTL_NR(cmd);
1113 struct vmw_master *vmaster;
1118 * Do extra checking on driver private ioctls.
1121 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1122 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1123 const struct drm_ioctl_desc *ioctl =
1124 &vmw_ioctls[nr - DRM_COMMAND_BASE];
1126 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1127 ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1128 if (unlikely(ret != 0))
1131 if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1132 goto out_io_encoding;
1134 return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1136 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1137 if (!drm_is_current_master(file_priv) &&
1138 !capable(CAP_SYS_ADMIN))
1142 if (unlikely(ioctl->cmd != cmd))
1143 goto out_io_encoding;
1145 flags = ioctl->flags;
1146 } else if (!drm_ioctl_flags(nr, &flags))
1149 vmaster = vmw_master_check(dev, file_priv, flags);
1150 if (IS_ERR(vmaster)) {
1151 ret = PTR_ERR(vmaster);
1153 if (ret != -ERESTARTSYS)
1154 DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1159 ret = ioctl_func(filp, cmd, arg);
1161 ttm_read_unlock(&vmaster->lock);
1166 DRM_ERROR("Invalid command format, ioctl %d\n",
1167 nr - DRM_COMMAND_BASE);
1172 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1175 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1178 #ifdef CONFIG_COMPAT
1179 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1182 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1186 static void vmw_lastclose(struct drm_device *dev)
1190 static void vmw_master_init(struct vmw_master *vmaster)
1192 ttm_lock_init(&vmaster->lock);
1195 static int vmw_master_create(struct drm_device *dev,
1196 struct drm_master *master)
1198 struct vmw_master *vmaster;
1200 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1201 if (unlikely(vmaster == NULL))
1204 vmw_master_init(vmaster);
1205 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1206 master->driver_priv = vmaster;
1211 static void vmw_master_destroy(struct drm_device *dev,
1212 struct drm_master *master)
1214 struct vmw_master *vmaster = vmw_master(master);
1216 master->driver_priv = NULL;
1220 static int vmw_master_set(struct drm_device *dev,
1221 struct drm_file *file_priv,
1224 struct vmw_private *dev_priv = vmw_priv(dev);
1225 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1226 struct vmw_master *active = dev_priv->active_master;
1227 struct vmw_master *vmaster = vmw_master(file_priv->master);
1231 BUG_ON(active != &dev_priv->fbdev_master);
1232 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1233 if (unlikely(ret != 0))
1236 ttm_lock_set_kill(&active->lock, true, SIGTERM);
1237 dev_priv->active_master = NULL;
1240 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1242 ttm_vt_unlock(&vmaster->lock);
1243 BUG_ON(vmw_fp->locked_master != file_priv->master);
1244 drm_master_put(&vmw_fp->locked_master);
1247 dev_priv->active_master = vmaster;
1250 * Inform a new master that the layout may have changed while
1254 drm_sysfs_hotplug_event(dev);
1259 static void vmw_master_drop(struct drm_device *dev,
1260 struct drm_file *file_priv)
1262 struct vmw_private *dev_priv = vmw_priv(dev);
1263 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1264 struct vmw_master *vmaster = vmw_master(file_priv->master);
1268 * Make sure the master doesn't disappear while we have
1272 vmw_fp->locked_master = drm_master_get(file_priv->master);
1273 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1274 vmw_kms_legacy_hotspot_clear(dev_priv);
1275 if (unlikely((ret != 0))) {
1276 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1277 drm_master_put(&vmw_fp->locked_master);
1280 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1282 if (!dev_priv->enable_fb)
1283 vmw_svga_disable(dev_priv);
1285 dev_priv->active_master = &dev_priv->fbdev_master;
1286 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1287 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1289 if (dev_priv->enable_fb)
1290 vmw_fb_on(dev_priv);
1294 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1296 * @dev_priv: Pointer to device private struct.
1297 * Needs the reservation sem to be held in non-exclusive mode.
1299 static void __vmw_svga_enable(struct vmw_private *dev_priv)
1301 spin_lock(&dev_priv->svga_lock);
1302 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1303 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1304 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1306 spin_unlock(&dev_priv->svga_lock);
1310 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1312 * @dev_priv: Pointer to device private struct.
1314 void vmw_svga_enable(struct vmw_private *dev_priv)
1316 ttm_read_lock(&dev_priv->reservation_sem, false);
1317 __vmw_svga_enable(dev_priv);
1318 ttm_read_unlock(&dev_priv->reservation_sem);
1322 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1324 * @dev_priv: Pointer to device private struct.
1325 * Needs the reservation sem to be held in exclusive mode.
1326 * Will not empty VRAM. VRAM must be emptied by caller.
1328 static void __vmw_svga_disable(struct vmw_private *dev_priv)
1330 spin_lock(&dev_priv->svga_lock);
1331 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1332 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1333 vmw_write(dev_priv, SVGA_REG_ENABLE,
1334 SVGA_REG_ENABLE_HIDE |
1335 SVGA_REG_ENABLE_ENABLE);
1337 spin_unlock(&dev_priv->svga_lock);
1341 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1344 * @dev_priv: Pointer to device private struct.
1347 void vmw_svga_disable(struct vmw_private *dev_priv)
1349 ttm_write_lock(&dev_priv->reservation_sem, false);
1350 spin_lock(&dev_priv->svga_lock);
1351 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1352 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1353 spin_unlock(&dev_priv->svga_lock);
1354 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1355 DRM_ERROR("Failed evicting VRAM buffers.\n");
1356 vmw_write(dev_priv, SVGA_REG_ENABLE,
1357 SVGA_REG_ENABLE_HIDE |
1358 SVGA_REG_ENABLE_ENABLE);
1360 spin_unlock(&dev_priv->svga_lock);
1361 ttm_write_unlock(&dev_priv->reservation_sem);
1364 static void vmw_remove(struct pci_dev *pdev)
1366 struct drm_device *dev = pci_get_drvdata(pdev);
1368 pci_disable_device(pdev);
1372 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1375 struct vmw_private *dev_priv =
1376 container_of(nb, struct vmw_private, pm_nb);
1379 case PM_HIBERNATION_PREPARE:
1380 if (dev_priv->enable_fb)
1381 vmw_fb_off(dev_priv);
1382 ttm_suspend_lock(&dev_priv->reservation_sem);
1385 * This empties VRAM and unbinds all GMR bindings.
1386 * Buffer contents is moved to swappable memory.
1388 vmw_execbuf_release_pinned_bo(dev_priv);
1389 vmw_resource_evict_all(dev_priv);
1390 vmw_release_device_early(dev_priv);
1391 ttm_bo_swapout_all(&dev_priv->bdev);
1392 vmw_fence_fifo_down(dev_priv->fman);
1394 case PM_POST_HIBERNATION:
1395 case PM_POST_RESTORE:
1396 vmw_fence_fifo_up(dev_priv->fman);
1397 ttm_suspend_unlock(&dev_priv->reservation_sem);
1398 if (dev_priv->enable_fb)
1399 vmw_fb_on(dev_priv);
1401 case PM_RESTORE_PREPARE:
1409 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1411 struct drm_device *dev = pci_get_drvdata(pdev);
1412 struct vmw_private *dev_priv = vmw_priv(dev);
1414 if (dev_priv->refuse_hibernation)
1417 pci_save_state(pdev);
1418 pci_disable_device(pdev);
1419 pci_set_power_state(pdev, PCI_D3hot);
1423 static int vmw_pci_resume(struct pci_dev *pdev)
1425 pci_set_power_state(pdev, PCI_D0);
1426 pci_restore_state(pdev);
1427 return pci_enable_device(pdev);
1430 static int vmw_pm_suspend(struct device *kdev)
1432 struct pci_dev *pdev = to_pci_dev(kdev);
1433 struct pm_message dummy;
1437 return vmw_pci_suspend(pdev, dummy);
1440 static int vmw_pm_resume(struct device *kdev)
1442 struct pci_dev *pdev = to_pci_dev(kdev);
1444 return vmw_pci_resume(pdev);
1447 static int vmw_pm_freeze(struct device *kdev)
1449 struct pci_dev *pdev = to_pci_dev(kdev);
1450 struct drm_device *dev = pci_get_drvdata(pdev);
1451 struct vmw_private *dev_priv = vmw_priv(dev);
1453 dev_priv->suspended = true;
1454 if (dev_priv->enable_fb)
1455 vmw_fifo_resource_dec(dev_priv);
1457 if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1458 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1459 if (dev_priv->enable_fb)
1460 vmw_fifo_resource_inc(dev_priv);
1461 WARN_ON(vmw_request_device_late(dev_priv));
1462 dev_priv->suspended = false;
1466 if (dev_priv->enable_fb)
1467 __vmw_svga_disable(dev_priv);
1469 vmw_release_device_late(dev_priv);
1474 static int vmw_pm_restore(struct device *kdev)
1476 struct pci_dev *pdev = to_pci_dev(kdev);
1477 struct drm_device *dev = pci_get_drvdata(pdev);
1478 struct vmw_private *dev_priv = vmw_priv(dev);
1481 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1482 (void) vmw_read(dev_priv, SVGA_REG_ID);
1484 if (dev_priv->enable_fb)
1485 vmw_fifo_resource_inc(dev_priv);
1487 ret = vmw_request_device(dev_priv);
1491 if (dev_priv->enable_fb)
1492 __vmw_svga_enable(dev_priv);
1494 dev_priv->suspended = false;
1499 static const struct dev_pm_ops vmw_pm_ops = {
1500 .freeze = vmw_pm_freeze,
1501 .thaw = vmw_pm_restore,
1502 .restore = vmw_pm_restore,
1503 .suspend = vmw_pm_suspend,
1504 .resume = vmw_pm_resume,
1507 static const struct file_operations vmwgfx_driver_fops = {
1508 .owner = THIS_MODULE,
1510 .release = drm_release,
1511 .unlocked_ioctl = vmw_unlocked_ioctl,
1513 .poll = vmw_fops_poll,
1514 .read = vmw_fops_read,
1515 #if defined(CONFIG_COMPAT)
1516 .compat_ioctl = vmw_compat_ioctl,
1518 .llseek = noop_llseek,
1521 static struct drm_driver driver = {
1522 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1523 DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
1524 .load = vmw_driver_load,
1525 .unload = vmw_driver_unload,
1526 .lastclose = vmw_lastclose,
1527 .irq_preinstall = vmw_irq_preinstall,
1528 .irq_postinstall = vmw_irq_postinstall,
1529 .irq_uninstall = vmw_irq_uninstall,
1530 .irq_handler = vmw_irq_handler,
1531 .get_vblank_counter = vmw_get_vblank_counter,
1532 .enable_vblank = vmw_enable_vblank,
1533 .disable_vblank = vmw_disable_vblank,
1534 .ioctls = vmw_ioctls,
1535 .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1536 .master_create = vmw_master_create,
1537 .master_destroy = vmw_master_destroy,
1538 .master_set = vmw_master_set,
1539 .master_drop = vmw_master_drop,
1540 .open = vmw_driver_open,
1541 .postclose = vmw_postclose,
1542 .set_busid = drm_pci_set_busid,
1544 .dumb_create = vmw_dumb_create,
1545 .dumb_map_offset = vmw_dumb_map_offset,
1546 .dumb_destroy = vmw_dumb_destroy,
1548 .prime_fd_to_handle = vmw_prime_fd_to_handle,
1549 .prime_handle_to_fd = vmw_prime_handle_to_fd,
1551 .fops = &vmwgfx_driver_fops,
1552 .name = VMWGFX_DRIVER_NAME,
1553 .desc = VMWGFX_DRIVER_DESC,
1554 .date = VMWGFX_DRIVER_DATE,
1555 .major = VMWGFX_DRIVER_MAJOR,
1556 .minor = VMWGFX_DRIVER_MINOR,
1557 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1560 static struct pci_driver vmw_pci_driver = {
1561 .name = VMWGFX_DRIVER_NAME,
1562 .id_table = vmw_pci_id_list,
1564 .remove = vmw_remove,
1570 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1572 return drm_get_pci_dev(pdev, ent, &driver);
1575 static int __init vmwgfx_init(void)
1579 if (vgacon_text_force())
1582 ret = drm_pci_init(&driver, &vmw_pci_driver);
1584 DRM_ERROR("Failed initializing DRM.\n");
1588 static void __exit vmwgfx_exit(void)
1590 drm_pci_exit(&driver, &vmw_pci_driver);
1593 module_init(vmwgfx_init);
1594 module_exit(vmwgfx_exit);
1596 MODULE_AUTHOR("VMware Inc. and others");
1597 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1598 MODULE_LICENSE("GPL and additional rights");
1599 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1600 __stringify(VMWGFX_DRIVER_MINOR) "."
1601 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."