GNU Linux-libre 4.4.285-gnu1
[releases.git] / drivers / gpu / drm / vmwgfx / vmwgfx_drv.c
1 /**************************************************************************
2  *
3  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include <linux/module.h>
28 #include <linux/console.h>
29
30 #include <drm/drmP.h>
31 #include "vmwgfx_drv.h"
32 #include "vmwgfx_binding.h"
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_object.h>
36 #include <drm/ttm/ttm_module.h>
37 #include <linux/dma_remapping.h>
38
39 #define VMWGFX_DRIVER_NAME "vmwgfx"
40 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
41 #define VMWGFX_CHIP_SVGAII 0
42 #define VMW_FB_RESERVATION 0
43
44 #define VMW_MIN_INITIAL_WIDTH 800
45 #define VMW_MIN_INITIAL_HEIGHT 600
46
47
48 /**
49  * Fully encoded drm commands. Might move to vmw_drm.h
50  */
51
52 #define DRM_IOCTL_VMW_GET_PARAM                                 \
53         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,          \
54                  struct drm_vmw_getparam_arg)
55 #define DRM_IOCTL_VMW_ALLOC_DMABUF                              \
56         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,       \
57                 union drm_vmw_alloc_dmabuf_arg)
58 #define DRM_IOCTL_VMW_UNREF_DMABUF                              \
59         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,        \
60                 struct drm_vmw_unref_dmabuf_arg)
61 #define DRM_IOCTL_VMW_CURSOR_BYPASS                             \
62         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,       \
63                  struct drm_vmw_cursor_bypass_arg)
64
65 #define DRM_IOCTL_VMW_CONTROL_STREAM                            \
66         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,      \
67                  struct drm_vmw_control_stream_arg)
68 #define DRM_IOCTL_VMW_CLAIM_STREAM                              \
69         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,        \
70                  struct drm_vmw_stream_arg)
71 #define DRM_IOCTL_VMW_UNREF_STREAM                              \
72         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,        \
73                  struct drm_vmw_stream_arg)
74
75 #define DRM_IOCTL_VMW_CREATE_CONTEXT                            \
76         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,      \
77                 struct drm_vmw_context_arg)
78 #define DRM_IOCTL_VMW_UNREF_CONTEXT                             \
79         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,       \
80                 struct drm_vmw_context_arg)
81 #define DRM_IOCTL_VMW_CREATE_SURFACE                            \
82         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,     \
83                  union drm_vmw_surface_create_arg)
84 #define DRM_IOCTL_VMW_UNREF_SURFACE                             \
85         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,       \
86                  struct drm_vmw_surface_arg)
87 #define DRM_IOCTL_VMW_REF_SURFACE                               \
88         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,        \
89                  union drm_vmw_surface_reference_arg)
90 #define DRM_IOCTL_VMW_EXECBUF                                   \
91         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,             \
92                 struct drm_vmw_execbuf_arg)
93 #define DRM_IOCTL_VMW_GET_3D_CAP                                \
94         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,          \
95                  struct drm_vmw_get_3d_cap_arg)
96 #define DRM_IOCTL_VMW_FENCE_WAIT                                \
97         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,         \
98                  struct drm_vmw_fence_wait_arg)
99 #define DRM_IOCTL_VMW_FENCE_SIGNALED                            \
100         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,     \
101                  struct drm_vmw_fence_signaled_arg)
102 #define DRM_IOCTL_VMW_FENCE_UNREF                               \
103         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,         \
104                  struct drm_vmw_fence_arg)
105 #define DRM_IOCTL_VMW_FENCE_EVENT                               \
106         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,         \
107                  struct drm_vmw_fence_event_arg)
108 #define DRM_IOCTL_VMW_PRESENT                                   \
109         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,             \
110                  struct drm_vmw_present_arg)
111 #define DRM_IOCTL_VMW_PRESENT_READBACK                          \
112         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,    \
113                  struct drm_vmw_present_readback_arg)
114 #define DRM_IOCTL_VMW_UPDATE_LAYOUT                             \
115         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,       \
116                  struct drm_vmw_update_layout_arg)
117 #define DRM_IOCTL_VMW_CREATE_SHADER                             \
118         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,      \
119                  struct drm_vmw_shader_create_arg)
120 #define DRM_IOCTL_VMW_UNREF_SHADER                              \
121         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,        \
122                  struct drm_vmw_shader_arg)
123 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE                         \
124         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,  \
125                  union drm_vmw_gb_surface_create_arg)
126 #define DRM_IOCTL_VMW_GB_SURFACE_REF                            \
127         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,     \
128                  union drm_vmw_gb_surface_reference_arg)
129 #define DRM_IOCTL_VMW_SYNCCPU                                   \
130         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,             \
131                  struct drm_vmw_synccpu_arg)
132 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT                   \
133         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,    \
134                 struct drm_vmw_context_arg)
135
136 /**
137  * The core DRM version of this macro doesn't account for
138  * DRM_COMMAND_BASE.
139  */
140
141 #define VMW_IOCTL_DEF(ioctl, func, flags) \
142   [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
143
144 /**
145  * Ioctl definitions.
146  */
147
148 static const struct drm_ioctl_desc vmw_ioctls[] = {
149         VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
150                       DRM_AUTH | DRM_RENDER_ALLOW),
151         VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
152                       DRM_AUTH | DRM_RENDER_ALLOW),
153         VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
154                       DRM_RENDER_ALLOW),
155         VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
156                       vmw_kms_cursor_bypass_ioctl,
157                       DRM_MASTER | DRM_CONTROL_ALLOW),
158
159         VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
160                       DRM_MASTER | DRM_CONTROL_ALLOW),
161         VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
162                       DRM_MASTER | DRM_CONTROL_ALLOW),
163         VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
164                       DRM_MASTER | DRM_CONTROL_ALLOW),
165
166         VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
167                       DRM_AUTH | DRM_RENDER_ALLOW),
168         VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
169                       DRM_RENDER_ALLOW),
170         VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
171                       DRM_AUTH | DRM_RENDER_ALLOW),
172         VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
173                       DRM_RENDER_ALLOW),
174         VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
175                       DRM_AUTH | DRM_RENDER_ALLOW),
176         VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
177                       DRM_RENDER_ALLOW),
178         VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
179                       DRM_RENDER_ALLOW),
180         VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
181                       vmw_fence_obj_signaled_ioctl,
182                       DRM_RENDER_ALLOW),
183         VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
184                       DRM_RENDER_ALLOW),
185         VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
186                       DRM_AUTH | DRM_RENDER_ALLOW),
187         VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
188                       DRM_AUTH | DRM_RENDER_ALLOW),
189
190         /* these allow direct access to the framebuffers mark as master only */
191         VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
192                       DRM_MASTER | DRM_AUTH),
193         VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
194                       vmw_present_readback_ioctl,
195                       DRM_MASTER | DRM_AUTH),
196         VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
197                       vmw_kms_update_layout_ioctl,
198                       DRM_MASTER),
199         VMW_IOCTL_DEF(VMW_CREATE_SHADER,
200                       vmw_shader_define_ioctl,
201                       DRM_AUTH | DRM_RENDER_ALLOW),
202         VMW_IOCTL_DEF(VMW_UNREF_SHADER,
203                       vmw_shader_destroy_ioctl,
204                       DRM_RENDER_ALLOW),
205         VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
206                       vmw_gb_surface_define_ioctl,
207                       DRM_AUTH | DRM_RENDER_ALLOW),
208         VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
209                       vmw_gb_surface_reference_ioctl,
210                       DRM_AUTH | DRM_RENDER_ALLOW),
211         VMW_IOCTL_DEF(VMW_SYNCCPU,
212                       vmw_user_dmabuf_synccpu_ioctl,
213                       DRM_RENDER_ALLOW),
214         VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
215                       vmw_extended_context_define_ioctl,
216                       DRM_AUTH | DRM_RENDER_ALLOW),
217 };
218
219 static struct pci_device_id vmw_pci_id_list[] = {
220         {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
221         {0, 0, 0}
222 };
223 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
224
225 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
226 static int vmw_force_iommu;
227 static int vmw_restrict_iommu;
228 static int vmw_force_coherent;
229 static int vmw_restrict_dma_mask;
230 static int vmw_assume_16bpp;
231
232 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
233 static void vmw_master_init(struct vmw_master *);
234 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
235                               void *ptr);
236
237 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
238 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
239 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
240 module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
241 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
242 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
243 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
244 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
245 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
246 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
247 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
248 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
249
250
251 static void vmw_print_capabilities(uint32_t capabilities)
252 {
253         DRM_INFO("Capabilities:\n");
254         if (capabilities & SVGA_CAP_RECT_COPY)
255                 DRM_INFO("  Rect copy.\n");
256         if (capabilities & SVGA_CAP_CURSOR)
257                 DRM_INFO("  Cursor.\n");
258         if (capabilities & SVGA_CAP_CURSOR_BYPASS)
259                 DRM_INFO("  Cursor bypass.\n");
260         if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
261                 DRM_INFO("  Cursor bypass 2.\n");
262         if (capabilities & SVGA_CAP_8BIT_EMULATION)
263                 DRM_INFO("  8bit emulation.\n");
264         if (capabilities & SVGA_CAP_ALPHA_CURSOR)
265                 DRM_INFO("  Alpha cursor.\n");
266         if (capabilities & SVGA_CAP_3D)
267                 DRM_INFO("  3D.\n");
268         if (capabilities & SVGA_CAP_EXTENDED_FIFO)
269                 DRM_INFO("  Extended Fifo.\n");
270         if (capabilities & SVGA_CAP_MULTIMON)
271                 DRM_INFO("  Multimon.\n");
272         if (capabilities & SVGA_CAP_PITCHLOCK)
273                 DRM_INFO("  Pitchlock.\n");
274         if (capabilities & SVGA_CAP_IRQMASK)
275                 DRM_INFO("  Irq mask.\n");
276         if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
277                 DRM_INFO("  Display Topology.\n");
278         if (capabilities & SVGA_CAP_GMR)
279                 DRM_INFO("  GMR.\n");
280         if (capabilities & SVGA_CAP_TRACES)
281                 DRM_INFO("  Traces.\n");
282         if (capabilities & SVGA_CAP_GMR2)
283                 DRM_INFO("  GMR2.\n");
284         if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
285                 DRM_INFO("  Screen Object 2.\n");
286         if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
287                 DRM_INFO("  Command Buffers.\n");
288         if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
289                 DRM_INFO("  Command Buffers 2.\n");
290         if (capabilities & SVGA_CAP_GBOBJECTS)
291                 DRM_INFO("  Guest Backed Resources.\n");
292         if (capabilities & SVGA_CAP_DX)
293                 DRM_INFO("  DX Features.\n");
294 }
295
296 /**
297  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
298  *
299  * @dev_priv: A device private structure.
300  *
301  * This function creates a small buffer object that holds the query
302  * result for dummy queries emitted as query barriers.
303  * The function will then map the first page and initialize a pending
304  * occlusion query result structure, Finally it will unmap the buffer.
305  * No interruptible waits are done within this function.
306  *
307  * Returns an error if bo creation or initialization fails.
308  */
309 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
310 {
311         int ret;
312         struct vmw_dma_buffer *vbo;
313         struct ttm_bo_kmap_obj map;
314         volatile SVGA3dQueryResult *result;
315         bool dummy;
316
317         /*
318          * Create the vbo as pinned, so that a tryreserve will
319          * immediately succeed. This is because we're the only
320          * user of the bo currently.
321          */
322         vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
323         if (!vbo)
324                 return -ENOMEM;
325
326         ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
327                               &vmw_sys_ne_placement, false,
328                               &vmw_dmabuf_bo_free);
329         if (unlikely(ret != 0))
330                 return ret;
331
332         ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
333         BUG_ON(ret != 0);
334         vmw_bo_pin_reserved(vbo, true);
335
336         ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
337         if (likely(ret == 0)) {
338                 result = ttm_kmap_obj_virtual(&map, &dummy);
339                 result->totalSize = sizeof(*result);
340                 result->state = SVGA3D_QUERYSTATE_PENDING;
341                 result->result32 = 0xff;
342                 ttm_bo_kunmap(&map);
343         }
344         vmw_bo_pin_reserved(vbo, false);
345         ttm_bo_unreserve(&vbo->base);
346
347         if (unlikely(ret != 0)) {
348                 DRM_ERROR("Dummy query buffer map failed.\n");
349                 vmw_dmabuf_unreference(&vbo);
350         } else
351                 dev_priv->dummy_query_bo = vbo;
352
353         return ret;
354 }
355
356 /**
357  * vmw_request_device_late - Perform late device setup
358  *
359  * @dev_priv: Pointer to device private.
360  *
361  * This function performs setup of otables and enables large command
362  * buffer submission. These tasks are split out to a separate function
363  * because it reverts vmw_release_device_early and is intended to be used
364  * by an error path in the hibernation code.
365  */
366 static int vmw_request_device_late(struct vmw_private *dev_priv)
367 {
368         int ret;
369
370         if (dev_priv->has_mob) {
371                 ret = vmw_otables_setup(dev_priv);
372                 if (unlikely(ret != 0)) {
373                         DRM_ERROR("Unable to initialize "
374                                   "guest Memory OBjects.\n");
375                         return ret;
376                 }
377         }
378
379         if (dev_priv->cman) {
380                 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
381                                                256*4096, 2*4096);
382                 if (ret) {
383                         struct vmw_cmdbuf_man *man = dev_priv->cman;
384
385                         dev_priv->cman = NULL;
386                         vmw_cmdbuf_man_destroy(man);
387                 }
388         }
389
390         return 0;
391 }
392
393 static int vmw_request_device(struct vmw_private *dev_priv)
394 {
395         int ret;
396
397         ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
398         if (unlikely(ret != 0)) {
399                 DRM_ERROR("Unable to initialize FIFO.\n");
400                 return ret;
401         }
402         vmw_fence_fifo_up(dev_priv->fman);
403         dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
404         if (IS_ERR(dev_priv->cman)) {
405                 dev_priv->cman = NULL;
406                 dev_priv->has_dx = false;
407         }
408
409         ret = vmw_request_device_late(dev_priv);
410         if (ret)
411                 goto out_no_mob;
412
413         ret = vmw_dummy_query_bo_create(dev_priv);
414         if (unlikely(ret != 0))
415                 goto out_no_query_bo;
416
417         return 0;
418
419 out_no_query_bo:
420         if (dev_priv->cman)
421                 vmw_cmdbuf_remove_pool(dev_priv->cman);
422         if (dev_priv->has_mob) {
423                 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
424                 vmw_otables_takedown(dev_priv);
425         }
426         if (dev_priv->cman)
427                 vmw_cmdbuf_man_destroy(dev_priv->cman);
428 out_no_mob:
429         vmw_fence_fifo_down(dev_priv->fman);
430         vmw_fifo_release(dev_priv, &dev_priv->fifo);
431         return ret;
432 }
433
434 /**
435  * vmw_release_device_early - Early part of fifo takedown.
436  *
437  * @dev_priv: Pointer to device private struct.
438  *
439  * This is the first part of command submission takedown, to be called before
440  * buffer management is taken down.
441  */
442 static void vmw_release_device_early(struct vmw_private *dev_priv)
443 {
444         /*
445          * Previous destructions should've released
446          * the pinned bo.
447          */
448
449         BUG_ON(dev_priv->pinned_bo != NULL);
450
451         vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
452         if (dev_priv->cman)
453                 vmw_cmdbuf_remove_pool(dev_priv->cman);
454
455         if (dev_priv->has_mob) {
456                 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
457                 vmw_otables_takedown(dev_priv);
458         }
459 }
460
461 /**
462  * vmw_release_device_late - Late part of fifo takedown.
463  *
464  * @dev_priv: Pointer to device private struct.
465  *
466  * This is the last part of the command submission takedown, to be called when
467  * command submission is no longer needed. It may wait on pending fences.
468  */
469 static void vmw_release_device_late(struct vmw_private *dev_priv)
470 {
471         vmw_fence_fifo_down(dev_priv->fman);
472         if (dev_priv->cman)
473                 vmw_cmdbuf_man_destroy(dev_priv->cman);
474
475         vmw_fifo_release(dev_priv, &dev_priv->fifo);
476 }
477
478 /**
479  * Sets the initial_[width|height] fields on the given vmw_private.
480  *
481  * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
482  * clamping the value to fb_max_[width|height] fields and the
483  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
484  * If the values appear to be invalid, set them to
485  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
486  */
487 static void vmw_get_initial_size(struct vmw_private *dev_priv)
488 {
489         uint32_t width;
490         uint32_t height;
491
492         width = vmw_read(dev_priv, SVGA_REG_WIDTH);
493         height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
494
495         width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
496         height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
497
498         if (width > dev_priv->fb_max_width ||
499             height > dev_priv->fb_max_height) {
500
501                 /*
502                  * This is a host error and shouldn't occur.
503                  */
504
505                 width = VMW_MIN_INITIAL_WIDTH;
506                 height = VMW_MIN_INITIAL_HEIGHT;
507         }
508
509         dev_priv->initial_width = width;
510         dev_priv->initial_height = height;
511 }
512
513 /**
514  * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
515  * system.
516  *
517  * @dev_priv: Pointer to a struct vmw_private
518  *
519  * This functions tries to determine the IOMMU setup and what actions
520  * need to be taken by the driver to make system pages visible to the
521  * device.
522  * If this function decides that DMA is not possible, it returns -EINVAL.
523  * The driver may then try to disable features of the device that require
524  * DMA.
525  */
526 static int vmw_dma_select_mode(struct vmw_private *dev_priv)
527 {
528         static const char *names[vmw_dma_map_max] = {
529                 [vmw_dma_phys] = "Using physical TTM page addresses.",
530                 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
531                 [vmw_dma_map_populate] = "Keeping DMA mappings.",
532                 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
533 #ifdef CONFIG_X86
534         const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
535
536 #ifdef CONFIG_INTEL_IOMMU
537         if (intel_iommu_enabled) {
538                 dev_priv->map_mode = vmw_dma_map_populate;
539                 goto out_fixup;
540         }
541 #endif
542
543         if (!(vmw_force_iommu || vmw_force_coherent)) {
544                 dev_priv->map_mode = vmw_dma_phys;
545                 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
546                 return 0;
547         }
548
549         dev_priv->map_mode = vmw_dma_map_populate;
550
551         if (dma_ops->sync_single_for_cpu)
552                 dev_priv->map_mode = vmw_dma_alloc_coherent;
553 #ifdef CONFIG_SWIOTLB
554         if (swiotlb_nr_tbl() == 0)
555                 dev_priv->map_mode = vmw_dma_map_populate;
556 #endif
557
558 #ifdef CONFIG_INTEL_IOMMU
559 out_fixup:
560 #endif
561         if (dev_priv->map_mode == vmw_dma_map_populate &&
562             vmw_restrict_iommu)
563                 dev_priv->map_mode = vmw_dma_map_bind;
564
565         if (vmw_force_coherent)
566                 dev_priv->map_mode = vmw_dma_alloc_coherent;
567
568 #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
569         /*
570          * No coherent page pool
571          */
572         if (dev_priv->map_mode == vmw_dma_alloc_coherent)
573                 return -EINVAL;
574 #endif
575
576 #else /* CONFIG_X86 */
577         dev_priv->map_mode = vmw_dma_map_populate;
578 #endif /* CONFIG_X86 */
579
580         DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
581
582         return 0;
583 }
584
585 /**
586  * vmw_dma_masks - set required page- and dma masks
587  *
588  * @dev: Pointer to struct drm-device
589  *
590  * With 32-bit we can only handle 32 bit PFNs. Optionally set that
591  * restriction also for 64-bit systems.
592  */
593 #ifdef CONFIG_INTEL_IOMMU
594 static int vmw_dma_masks(struct vmw_private *dev_priv)
595 {
596         struct drm_device *dev = dev_priv->dev;
597         int ret = 0;
598
599         ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
600         if (dev_priv->map_mode != vmw_dma_phys &&
601             (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
602                 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
603                 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
604         }
605
606         return ret;
607 }
608 #else
609 static int vmw_dma_masks(struct vmw_private *dev_priv)
610 {
611         return 0;
612 }
613 #endif
614
615 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
616 {
617         struct vmw_private *dev_priv;
618         int ret;
619         uint32_t svga_id;
620         enum vmw_res_type i;
621         bool refuse_dma = false;
622
623         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
624         if (unlikely(dev_priv == NULL)) {
625                 DRM_ERROR("Failed allocating a device private struct.\n");
626                 return -ENOMEM;
627         }
628
629         pci_set_master(dev->pdev);
630
631         dev_priv->dev = dev;
632         dev_priv->vmw_chipset = chipset;
633         dev_priv->last_read_seqno = (uint32_t) -100;
634         mutex_init(&dev_priv->cmdbuf_mutex);
635         mutex_init(&dev_priv->release_mutex);
636         mutex_init(&dev_priv->binding_mutex);
637         rwlock_init(&dev_priv->resource_lock);
638         ttm_lock_init(&dev_priv->reservation_sem);
639         spin_lock_init(&dev_priv->hw_lock);
640         spin_lock_init(&dev_priv->waiter_lock);
641         spin_lock_init(&dev_priv->cap_lock);
642         spin_lock_init(&dev_priv->svga_lock);
643
644         for (i = vmw_res_context; i < vmw_res_max; ++i) {
645                 idr_init(&dev_priv->res_idr[i]);
646                 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
647         }
648
649         mutex_init(&dev_priv->init_mutex);
650         init_waitqueue_head(&dev_priv->fence_queue);
651         init_waitqueue_head(&dev_priv->fifo_queue);
652         dev_priv->fence_queue_waiters = 0;
653         dev_priv->fifo_queue_waiters = 0;
654
655         dev_priv->used_memory_size = 0;
656
657         dev_priv->io_start = pci_resource_start(dev->pdev, 0);
658         dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
659         dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
660
661         dev_priv->assume_16bpp = !!vmw_assume_16bpp;
662
663         dev_priv->enable_fb = enable_fbdev;
664
665         vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
666         svga_id = vmw_read(dev_priv, SVGA_REG_ID);
667         if (svga_id != SVGA_ID_2) {
668                 ret = -ENOSYS;
669                 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
670                 goto out_err0;
671         }
672
673         dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
674         ret = vmw_dma_select_mode(dev_priv);
675         if (unlikely(ret != 0)) {
676                 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
677                 refuse_dma = true;
678         }
679
680         dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
681         dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
682         dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
683         dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
684
685         vmw_get_initial_size(dev_priv);
686
687         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
688                 dev_priv->max_gmr_ids =
689                         vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
690                 dev_priv->max_gmr_pages =
691                         vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
692                 dev_priv->memory_size =
693                         vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
694                 dev_priv->memory_size -= dev_priv->vram_size;
695         } else {
696                 /*
697                  * An arbitrary limit of 512MiB on surface
698                  * memory. But all HWV8 hardware supports GMR2.
699                  */
700                 dev_priv->memory_size = 512*1024*1024;
701         }
702         dev_priv->max_mob_pages = 0;
703         dev_priv->max_mob_size = 0;
704         if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
705                 uint64_t mem_size =
706                         vmw_read(dev_priv,
707                                  SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
708
709                 /*
710                  * Workaround for low memory 2D VMs to compensate for the
711                  * allocation taken by fbdev
712                  */
713                 if (!(dev_priv->capabilities & SVGA_CAP_3D))
714                         mem_size *= 3;
715
716                 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
717                 dev_priv->prim_bb_mem =
718                         vmw_read(dev_priv,
719                                  SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
720                 dev_priv->max_mob_size =
721                         vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
722                 dev_priv->stdu_max_width =
723                         vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
724                 dev_priv->stdu_max_height =
725                         vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
726
727                 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
728                           SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
729                 dev_priv->texture_max_width = vmw_read(dev_priv,
730                                                        SVGA_REG_DEV_CAP);
731                 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
732                           SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
733                 dev_priv->texture_max_height = vmw_read(dev_priv,
734                                                         SVGA_REG_DEV_CAP);
735         } else {
736                 dev_priv->texture_max_width = 8192;
737                 dev_priv->texture_max_height = 8192;
738                 dev_priv->prim_bb_mem = dev_priv->vram_size;
739         }
740
741         vmw_print_capabilities(dev_priv->capabilities);
742
743         ret = vmw_dma_masks(dev_priv);
744         if (unlikely(ret != 0))
745                 goto out_err0;
746
747         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
748                 DRM_INFO("Max GMR ids is %u\n",
749                          (unsigned)dev_priv->max_gmr_ids);
750                 DRM_INFO("Max number of GMR pages is %u\n",
751                          (unsigned)dev_priv->max_gmr_pages);
752                 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
753                          (unsigned)dev_priv->memory_size / 1024);
754         }
755         DRM_INFO("Maximum display memory size is %u kiB\n",
756                  dev_priv->prim_bb_mem / 1024);
757         DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
758                  dev_priv->vram_start, dev_priv->vram_size / 1024);
759         DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
760                  dev_priv->mmio_start, dev_priv->mmio_size / 1024);
761
762         ret = vmw_ttm_global_init(dev_priv);
763         if (unlikely(ret != 0))
764                 goto out_err0;
765
766
767         vmw_master_init(&dev_priv->fbdev_master);
768         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
769         dev_priv->active_master = &dev_priv->fbdev_master;
770
771         dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
772                                        dev_priv->mmio_size, MEMREMAP_WB);
773
774         if (unlikely(dev_priv->mmio_virt == NULL)) {
775                 ret = -ENOMEM;
776                 DRM_ERROR("Failed mapping MMIO.\n");
777                 goto out_err3;
778         }
779
780         /* Need mmio memory to check for fifo pitchlock cap. */
781         if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
782             !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
783             !vmw_fifo_have_pitchlock(dev_priv)) {
784                 ret = -ENOSYS;
785                 DRM_ERROR("Hardware has no pitchlock\n");
786                 goto out_err4;
787         }
788
789         dev_priv->tdev = ttm_object_device_init
790                 (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
791
792         if (unlikely(dev_priv->tdev == NULL)) {
793                 DRM_ERROR("Unable to initialize TTM object management.\n");
794                 ret = -ENOMEM;
795                 goto out_err4;
796         }
797
798         dev->dev_private = dev_priv;
799
800         ret = pci_request_regions(dev->pdev, "vmwgfx probe");
801         dev_priv->stealth = (ret != 0);
802         if (dev_priv->stealth) {
803                 /**
804                  * Request at least the mmio PCI resource.
805                  */
806
807                 DRM_INFO("It appears like vesafb is loaded. "
808                          "Ignore above error if any.\n");
809                 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
810                 if (unlikely(ret != 0)) {
811                         DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
812                         goto out_no_device;
813                 }
814         }
815
816         if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
817                 ret = drm_irq_install(dev, dev->pdev->irq);
818                 if (ret != 0) {
819                         DRM_ERROR("Failed installing irq: %d\n", ret);
820                         goto out_no_irq;
821                 }
822         }
823
824         dev_priv->fman = vmw_fence_manager_init(dev_priv);
825         if (unlikely(dev_priv->fman == NULL)) {
826                 ret = -ENOMEM;
827                 goto out_no_fman;
828         }
829
830         ret = ttm_bo_device_init(&dev_priv->bdev,
831                                  dev_priv->bo_global_ref.ref.object,
832                                  &vmw_bo_driver,
833                                  dev->anon_inode->i_mapping,
834                                  VMWGFX_FILE_PAGE_OFFSET,
835                                  false);
836         if (unlikely(ret != 0)) {
837                 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
838                 goto out_no_bdev;
839         }
840
841         /*
842          * Enable VRAM, but initially don't use it until SVGA is enabled and
843          * unhidden.
844          */
845         ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
846                              (dev_priv->vram_size >> PAGE_SHIFT));
847         if (unlikely(ret != 0)) {
848                 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
849                 goto out_no_vram;
850         }
851         dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
852
853         dev_priv->has_gmr = true;
854         if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
855             refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
856                                          VMW_PL_GMR) != 0) {
857                 DRM_INFO("No GMR memory available. "
858                          "Graphics memory resources are very limited.\n");
859                 dev_priv->has_gmr = false;
860         }
861
862         if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
863                 dev_priv->has_mob = true;
864                 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
865                                    VMW_PL_MOB) != 0) {
866                         DRM_INFO("No MOB memory available. "
867                                  "3D will be disabled.\n");
868                         dev_priv->has_mob = false;
869                 }
870         }
871
872         if (dev_priv->has_mob) {
873                 spin_lock(&dev_priv->cap_lock);
874                 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
875                 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
876                 spin_unlock(&dev_priv->cap_lock);
877         }
878
879
880         ret = vmw_kms_init(dev_priv);
881         if (unlikely(ret != 0))
882                 goto out_no_kms;
883         vmw_overlay_init(dev_priv);
884
885         ret = vmw_request_device(dev_priv);
886         if (ret)
887                 goto out_no_fifo;
888
889         DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
890
891         if (dev_priv->enable_fb) {
892                 vmw_fifo_resource_inc(dev_priv);
893                 vmw_svga_enable(dev_priv);
894                 vmw_fb_init(dev_priv);
895         }
896
897         dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
898         register_pm_notifier(&dev_priv->pm_nb);
899
900         return 0;
901
902 out_no_fifo:
903         vmw_overlay_close(dev_priv);
904         vmw_kms_close(dev_priv);
905 out_no_kms:
906         if (dev_priv->has_mob)
907                 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
908         if (dev_priv->has_gmr)
909                 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
910         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
911 out_no_vram:
912         (void)ttm_bo_device_release(&dev_priv->bdev);
913 out_no_bdev:
914         vmw_fence_manager_takedown(dev_priv->fman);
915 out_no_fman:
916         if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
917                 drm_irq_uninstall(dev_priv->dev);
918 out_no_irq:
919         if (dev_priv->stealth)
920                 pci_release_region(dev->pdev, 2);
921         else
922                 pci_release_regions(dev->pdev);
923 out_no_device:
924         ttm_object_device_release(&dev_priv->tdev);
925 out_err4:
926         memunmap(dev_priv->mmio_virt);
927 out_err3:
928         vmw_ttm_global_release(dev_priv);
929 out_err0:
930         for (i = vmw_res_context; i < vmw_res_max; ++i)
931                 idr_destroy(&dev_priv->res_idr[i]);
932
933         if (dev_priv->ctx.staged_bindings)
934                 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
935         kfree(dev_priv);
936         return ret;
937 }
938
939 static int vmw_driver_unload(struct drm_device *dev)
940 {
941         struct vmw_private *dev_priv = vmw_priv(dev);
942         enum vmw_res_type i;
943
944         unregister_pm_notifier(&dev_priv->pm_nb);
945
946         if (dev_priv->ctx.res_ht_initialized)
947                 drm_ht_remove(&dev_priv->ctx.res_ht);
948         vfree(dev_priv->ctx.cmd_bounce);
949         if (dev_priv->enable_fb) {
950                 vmw_fb_off(dev_priv);
951                 vmw_fb_close(dev_priv);
952                 vmw_fifo_resource_dec(dev_priv);
953                 vmw_svga_disable(dev_priv);
954         }
955
956         vmw_kms_close(dev_priv);
957         vmw_overlay_close(dev_priv);
958
959         if (dev_priv->has_gmr)
960                 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
961         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
962
963         vmw_release_device_early(dev_priv);
964         if (dev_priv->has_mob)
965                 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
966         (void) ttm_bo_device_release(&dev_priv->bdev);
967         vmw_release_device_late(dev_priv);
968         vmw_fence_manager_takedown(dev_priv->fman);
969         if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
970                 drm_irq_uninstall(dev_priv->dev);
971         if (dev_priv->stealth)
972                 pci_release_region(dev->pdev, 2);
973         else
974                 pci_release_regions(dev->pdev);
975
976         ttm_object_device_release(&dev_priv->tdev);
977         memunmap(dev_priv->mmio_virt);
978         if (dev_priv->ctx.staged_bindings)
979                 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
980         vmw_ttm_global_release(dev_priv);
981
982         for (i = vmw_res_context; i < vmw_res_max; ++i)
983                 idr_destroy(&dev_priv->res_idr[i]);
984
985         kfree(dev_priv);
986
987         return 0;
988 }
989
990 static void vmw_preclose(struct drm_device *dev,
991                          struct drm_file *file_priv)
992 {
993         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
994         struct vmw_private *dev_priv = vmw_priv(dev);
995
996         vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
997 }
998
999 static void vmw_postclose(struct drm_device *dev,
1000                          struct drm_file *file_priv)
1001 {
1002         struct vmw_fpriv *vmw_fp;
1003
1004         vmw_fp = vmw_fpriv(file_priv);
1005
1006         if (vmw_fp->locked_master) {
1007                 struct vmw_master *vmaster =
1008                         vmw_master(vmw_fp->locked_master);
1009
1010                 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1011                 ttm_vt_unlock(&vmaster->lock);
1012                 drm_master_put(&vmw_fp->locked_master);
1013         }
1014
1015         ttm_object_file_release(&vmw_fp->tfile);
1016         kfree(vmw_fp);
1017 }
1018
1019 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1020 {
1021         struct vmw_private *dev_priv = vmw_priv(dev);
1022         struct vmw_fpriv *vmw_fp;
1023         int ret = -ENOMEM;
1024
1025         vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1026         if (unlikely(vmw_fp == NULL))
1027                 return ret;
1028
1029         INIT_LIST_HEAD(&vmw_fp->fence_events);
1030         vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1031         if (unlikely(vmw_fp->tfile == NULL))
1032                 goto out_no_tfile;
1033
1034         file_priv->driver_priv = vmw_fp;
1035
1036         return 0;
1037
1038 out_no_tfile:
1039         kfree(vmw_fp);
1040         return ret;
1041 }
1042
1043 static struct vmw_master *vmw_master_check(struct drm_device *dev,
1044                                            struct drm_file *file_priv,
1045                                            unsigned int flags)
1046 {
1047         int ret;
1048         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1049         struct vmw_master *vmaster;
1050
1051         if (file_priv->minor->type != DRM_MINOR_LEGACY ||
1052             !(flags & DRM_AUTH))
1053                 return NULL;
1054
1055         ret = mutex_lock_interruptible(&dev->master_mutex);
1056         if (unlikely(ret != 0))
1057                 return ERR_PTR(-ERESTARTSYS);
1058
1059         if (file_priv->is_master) {
1060                 mutex_unlock(&dev->master_mutex);
1061                 return NULL;
1062         }
1063
1064         /*
1065          * Check if we were previously master, but now dropped. In that
1066          * case, allow at least render node functionality.
1067          */
1068         if (vmw_fp->locked_master) {
1069                 mutex_unlock(&dev->master_mutex);
1070
1071                 if (flags & DRM_RENDER_ALLOW)
1072                         return NULL;
1073
1074                 DRM_ERROR("Dropped master trying to access ioctl that "
1075                           "requires authentication.\n");
1076                 return ERR_PTR(-EACCES);
1077         }
1078         mutex_unlock(&dev->master_mutex);
1079
1080         /*
1081          * Take the TTM lock. Possibly sleep waiting for the authenticating
1082          * master to become master again, or for a SIGTERM if the
1083          * authenticating master exits.
1084          */
1085         vmaster = vmw_master(file_priv->master);
1086         ret = ttm_read_lock(&vmaster->lock, true);
1087         if (unlikely(ret != 0))
1088                 vmaster = ERR_PTR(ret);
1089
1090         return vmaster;
1091 }
1092
1093 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1094                               unsigned long arg,
1095                               long (*ioctl_func)(struct file *, unsigned int,
1096                                                  unsigned long))
1097 {
1098         struct drm_file *file_priv = filp->private_data;
1099         struct drm_device *dev = file_priv->minor->dev;
1100         unsigned int nr = DRM_IOCTL_NR(cmd);
1101         struct vmw_master *vmaster;
1102         unsigned int flags;
1103         long ret;
1104
1105         /*
1106          * Do extra checking on driver private ioctls.
1107          */
1108
1109         if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1110             && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1111                 const struct drm_ioctl_desc *ioctl =
1112                         &vmw_ioctls[nr - DRM_COMMAND_BASE];
1113
1114                 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1115                         ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1116                         if (unlikely(ret != 0))
1117                                 return ret;
1118
1119                         if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1120                                 goto out_io_encoding;
1121
1122                         return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1123                                                         _IOC_SIZE(cmd));
1124                 }
1125
1126                 if (unlikely(ioctl->cmd != cmd))
1127                         goto out_io_encoding;
1128
1129                 flags = ioctl->flags;
1130         } else if (!drm_ioctl_flags(nr, &flags))
1131                 return -EINVAL;
1132
1133         vmaster = vmw_master_check(dev, file_priv, flags);
1134         if (IS_ERR(vmaster)) {
1135                 ret = PTR_ERR(vmaster);
1136
1137                 if (ret != -ERESTARTSYS)
1138                         DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1139                                  nr, ret);
1140                 return ret;
1141         }
1142
1143         ret = ioctl_func(filp, cmd, arg);
1144         if (vmaster)
1145                 ttm_read_unlock(&vmaster->lock);
1146
1147         return ret;
1148
1149 out_io_encoding:
1150         DRM_ERROR("Invalid command format, ioctl %d\n",
1151                   nr - DRM_COMMAND_BASE);
1152
1153         return -EINVAL;
1154 }
1155
1156 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1157                                unsigned long arg)
1158 {
1159         return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1160 }
1161
1162 #ifdef CONFIG_COMPAT
1163 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1164                              unsigned long arg)
1165 {
1166         return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1167 }
1168 #endif
1169
1170 static void vmw_lastclose(struct drm_device *dev)
1171 {
1172 }
1173
1174 static void vmw_master_init(struct vmw_master *vmaster)
1175 {
1176         ttm_lock_init(&vmaster->lock);
1177 }
1178
1179 static int vmw_master_create(struct drm_device *dev,
1180                              struct drm_master *master)
1181 {
1182         struct vmw_master *vmaster;
1183
1184         vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1185         if (unlikely(vmaster == NULL))
1186                 return -ENOMEM;
1187
1188         vmw_master_init(vmaster);
1189         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1190         master->driver_priv = vmaster;
1191
1192         return 0;
1193 }
1194
1195 static void vmw_master_destroy(struct drm_device *dev,
1196                                struct drm_master *master)
1197 {
1198         struct vmw_master *vmaster = vmw_master(master);
1199
1200         master->driver_priv = NULL;
1201         kfree(vmaster);
1202 }
1203
1204 static int vmw_master_set(struct drm_device *dev,
1205                           struct drm_file *file_priv,
1206                           bool from_open)
1207 {
1208         struct vmw_private *dev_priv = vmw_priv(dev);
1209         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1210         struct vmw_master *active = dev_priv->active_master;
1211         struct vmw_master *vmaster = vmw_master(file_priv->master);
1212         int ret = 0;
1213
1214         if (active) {
1215                 BUG_ON(active != &dev_priv->fbdev_master);
1216                 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1217                 if (unlikely(ret != 0))
1218                         return ret;
1219
1220                 ttm_lock_set_kill(&active->lock, true, SIGTERM);
1221                 dev_priv->active_master = NULL;
1222         }
1223
1224         ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1225         if (!from_open) {
1226                 ttm_vt_unlock(&vmaster->lock);
1227                 BUG_ON(vmw_fp->locked_master != file_priv->master);
1228                 drm_master_put(&vmw_fp->locked_master);
1229         }
1230
1231         dev_priv->active_master = vmaster;
1232
1233         return 0;
1234 }
1235
1236 static void vmw_master_drop(struct drm_device *dev,
1237                             struct drm_file *file_priv,
1238                             bool from_release)
1239 {
1240         struct vmw_private *dev_priv = vmw_priv(dev);
1241         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1242         struct vmw_master *vmaster = vmw_master(file_priv->master);
1243         int ret;
1244
1245         /**
1246          * Make sure the master doesn't disappear while we have
1247          * it locked.
1248          */
1249
1250         vmw_fp->locked_master = drm_master_get(file_priv->master);
1251         ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1252         vmw_kms_legacy_hotspot_clear(dev_priv);
1253         if (unlikely((ret != 0))) {
1254                 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1255                 drm_master_put(&vmw_fp->locked_master);
1256         }
1257
1258         ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1259
1260         if (!dev_priv->enable_fb)
1261                 vmw_svga_disable(dev_priv);
1262
1263         dev_priv->active_master = &dev_priv->fbdev_master;
1264         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1265         ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1266
1267         if (dev_priv->enable_fb)
1268                 vmw_fb_on(dev_priv);
1269 }
1270
1271 /**
1272  * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1273  *
1274  * @dev_priv: Pointer to device private struct.
1275  * Needs the reservation sem to be held in non-exclusive mode.
1276  */
1277 static void __vmw_svga_enable(struct vmw_private *dev_priv)
1278 {
1279         spin_lock(&dev_priv->svga_lock);
1280         if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1281                 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1282                 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1283         }
1284         spin_unlock(&dev_priv->svga_lock);
1285 }
1286
1287 /**
1288  * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1289  *
1290  * @dev_priv: Pointer to device private struct.
1291  */
1292 void vmw_svga_enable(struct vmw_private *dev_priv)
1293 {
1294         ttm_read_lock(&dev_priv->reservation_sem, false);
1295         __vmw_svga_enable(dev_priv);
1296         ttm_read_unlock(&dev_priv->reservation_sem);
1297 }
1298
1299 /**
1300  * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1301  *
1302  * @dev_priv: Pointer to device private struct.
1303  * Needs the reservation sem to be held in exclusive mode.
1304  * Will not empty VRAM. VRAM must be emptied by caller.
1305  */
1306 static void __vmw_svga_disable(struct vmw_private *dev_priv)
1307 {
1308         spin_lock(&dev_priv->svga_lock);
1309         if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1310                 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1311                 vmw_write(dev_priv, SVGA_REG_ENABLE,
1312                           SVGA_REG_ENABLE_HIDE |
1313                           SVGA_REG_ENABLE_ENABLE);
1314         }
1315         spin_unlock(&dev_priv->svga_lock);
1316 }
1317
1318 /**
1319  * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1320  * running.
1321  *
1322  * @dev_priv: Pointer to device private struct.
1323  * Will empty VRAM.
1324  */
1325 void vmw_svga_disable(struct vmw_private *dev_priv)
1326 {
1327         ttm_write_lock(&dev_priv->reservation_sem, false);
1328         spin_lock(&dev_priv->svga_lock);
1329         if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1330                 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1331                 spin_unlock(&dev_priv->svga_lock);
1332                 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1333                         DRM_ERROR("Failed evicting VRAM buffers.\n");
1334                 vmw_write(dev_priv, SVGA_REG_ENABLE,
1335                           SVGA_REG_ENABLE_HIDE |
1336                           SVGA_REG_ENABLE_ENABLE);
1337         } else
1338                 spin_unlock(&dev_priv->svga_lock);
1339         ttm_write_unlock(&dev_priv->reservation_sem);
1340 }
1341
1342 static void vmw_remove(struct pci_dev *pdev)
1343 {
1344         struct drm_device *dev = pci_get_drvdata(pdev);
1345
1346         pci_disable_device(pdev);
1347         drm_put_dev(dev);
1348 }
1349
1350 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1351                               void *ptr)
1352 {
1353         struct vmw_private *dev_priv =
1354                 container_of(nb, struct vmw_private, pm_nb);
1355
1356         switch (val) {
1357         case PM_HIBERNATION_PREPARE:
1358                 if (dev_priv->enable_fb)
1359                         vmw_fb_off(dev_priv);
1360                 ttm_suspend_lock(&dev_priv->reservation_sem);
1361
1362                 /*
1363                  * This empties VRAM and unbinds all GMR bindings.
1364                  * Buffer contents is moved to swappable memory.
1365                  */
1366                 vmw_execbuf_release_pinned_bo(dev_priv);
1367                 vmw_resource_evict_all(dev_priv);
1368                 vmw_release_device_early(dev_priv);
1369                 ttm_bo_swapout_all(&dev_priv->bdev);
1370                 vmw_fence_fifo_down(dev_priv->fman);
1371                 break;
1372         case PM_POST_HIBERNATION:
1373         case PM_POST_RESTORE:
1374                 vmw_fence_fifo_up(dev_priv->fman);
1375                 ttm_suspend_unlock(&dev_priv->reservation_sem);
1376                 if (dev_priv->enable_fb)
1377                         vmw_fb_on(dev_priv);
1378                 break;
1379         case PM_RESTORE_PREPARE:
1380                 break;
1381         default:
1382                 break;
1383         }
1384         return 0;
1385 }
1386
1387 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1388 {
1389         struct drm_device *dev = pci_get_drvdata(pdev);
1390         struct vmw_private *dev_priv = vmw_priv(dev);
1391
1392         if (dev_priv->refuse_hibernation)
1393                 return -EBUSY;
1394
1395         pci_save_state(pdev);
1396         pci_disable_device(pdev);
1397         pci_set_power_state(pdev, PCI_D3hot);
1398         return 0;
1399 }
1400
1401 static int vmw_pci_resume(struct pci_dev *pdev)
1402 {
1403         pci_set_power_state(pdev, PCI_D0);
1404         pci_restore_state(pdev);
1405         return pci_enable_device(pdev);
1406 }
1407
1408 static int vmw_pm_suspend(struct device *kdev)
1409 {
1410         struct pci_dev *pdev = to_pci_dev(kdev);
1411         struct pm_message dummy;
1412
1413         dummy.event = 0;
1414
1415         return vmw_pci_suspend(pdev, dummy);
1416 }
1417
1418 static int vmw_pm_resume(struct device *kdev)
1419 {
1420         struct pci_dev *pdev = to_pci_dev(kdev);
1421
1422         return vmw_pci_resume(pdev);
1423 }
1424
1425 static int vmw_pm_freeze(struct device *kdev)
1426 {
1427         struct pci_dev *pdev = to_pci_dev(kdev);
1428         struct drm_device *dev = pci_get_drvdata(pdev);
1429         struct vmw_private *dev_priv = vmw_priv(dev);
1430
1431         dev_priv->suspended = true;
1432         if (dev_priv->enable_fb)
1433                 vmw_fifo_resource_dec(dev_priv);
1434
1435         if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1436                 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1437                 if (dev_priv->enable_fb)
1438                         vmw_fifo_resource_inc(dev_priv);
1439                 WARN_ON(vmw_request_device_late(dev_priv));
1440                 dev_priv->suspended = false;
1441                 return -EBUSY;
1442         }
1443
1444         if (dev_priv->enable_fb)
1445                 __vmw_svga_disable(dev_priv);
1446         
1447         vmw_release_device_late(dev_priv);
1448
1449         return 0;
1450 }
1451
1452 static int vmw_pm_restore(struct device *kdev)
1453 {
1454         struct pci_dev *pdev = to_pci_dev(kdev);
1455         struct drm_device *dev = pci_get_drvdata(pdev);
1456         struct vmw_private *dev_priv = vmw_priv(dev);
1457         int ret;
1458
1459         vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1460         (void) vmw_read(dev_priv, SVGA_REG_ID);
1461
1462         if (dev_priv->enable_fb)
1463                 vmw_fifo_resource_inc(dev_priv);
1464
1465         ret = vmw_request_device(dev_priv);
1466         if (ret)
1467                 return ret;
1468
1469         if (dev_priv->enable_fb)
1470                 __vmw_svga_enable(dev_priv);
1471
1472         dev_priv->suspended = false;
1473
1474         return 0;
1475 }
1476
1477 static const struct dev_pm_ops vmw_pm_ops = {
1478         .freeze = vmw_pm_freeze,
1479         .thaw = vmw_pm_restore,
1480         .restore = vmw_pm_restore,
1481         .suspend = vmw_pm_suspend,
1482         .resume = vmw_pm_resume,
1483 };
1484
1485 static const struct file_operations vmwgfx_driver_fops = {
1486         .owner = THIS_MODULE,
1487         .open = drm_open,
1488         .release = drm_release,
1489         .unlocked_ioctl = vmw_unlocked_ioctl,
1490         .mmap = vmw_mmap,
1491         .poll = vmw_fops_poll,
1492         .read = vmw_fops_read,
1493 #if defined(CONFIG_COMPAT)
1494         .compat_ioctl = vmw_compat_ioctl,
1495 #endif
1496         .llseek = noop_llseek,
1497 };
1498
1499 static struct drm_driver driver = {
1500         .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1501         DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
1502         .load = vmw_driver_load,
1503         .unload = vmw_driver_unload,
1504         .lastclose = vmw_lastclose,
1505         .irq_preinstall = vmw_irq_preinstall,
1506         .irq_postinstall = vmw_irq_postinstall,
1507         .irq_uninstall = vmw_irq_uninstall,
1508         .irq_handler = vmw_irq_handler,
1509         .get_vblank_counter = vmw_get_vblank_counter,
1510         .enable_vblank = vmw_enable_vblank,
1511         .disable_vblank = vmw_disable_vblank,
1512         .ioctls = vmw_ioctls,
1513         .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1514         .master_create = vmw_master_create,
1515         .master_destroy = vmw_master_destroy,
1516         .master_set = vmw_master_set,
1517         .master_drop = vmw_master_drop,
1518         .open = vmw_driver_open,
1519         .preclose = vmw_preclose,
1520         .postclose = vmw_postclose,
1521         .set_busid = drm_pci_set_busid,
1522
1523         .dumb_create = vmw_dumb_create,
1524         .dumb_map_offset = vmw_dumb_map_offset,
1525         .dumb_destroy = vmw_dumb_destroy,
1526
1527         .prime_fd_to_handle = vmw_prime_fd_to_handle,
1528         .prime_handle_to_fd = vmw_prime_handle_to_fd,
1529
1530         .fops = &vmwgfx_driver_fops,
1531         .name = VMWGFX_DRIVER_NAME,
1532         .desc = VMWGFX_DRIVER_DESC,
1533         .date = VMWGFX_DRIVER_DATE,
1534         .major = VMWGFX_DRIVER_MAJOR,
1535         .minor = VMWGFX_DRIVER_MINOR,
1536         .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1537 };
1538
1539 static struct pci_driver vmw_pci_driver = {
1540         .name = VMWGFX_DRIVER_NAME,
1541         .id_table = vmw_pci_id_list,
1542         .probe = vmw_probe,
1543         .remove = vmw_remove,
1544         .driver = {
1545                 .pm = &vmw_pm_ops
1546         }
1547 };
1548
1549 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1550 {
1551         return drm_get_pci_dev(pdev, ent, &driver);
1552 }
1553
1554 static int __init vmwgfx_init(void)
1555 {
1556         int ret;
1557
1558 #ifdef CONFIG_VGA_CONSOLE
1559         if (vgacon_text_force())
1560                 return -EINVAL;
1561 #endif
1562
1563         ret = drm_pci_init(&driver, &vmw_pci_driver);
1564         if (ret)
1565                 DRM_ERROR("Failed initializing DRM.\n");
1566         return ret;
1567 }
1568
1569 static void __exit vmwgfx_exit(void)
1570 {
1571         drm_pci_exit(&driver, &vmw_pci_driver);
1572 }
1573
1574 module_init(vmwgfx_init);
1575 module_exit(vmwgfx_exit);
1576
1577 MODULE_AUTHOR("VMware Inc. and others");
1578 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1579 MODULE_LICENSE("GPL and additional rights");
1580 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1581                __stringify(VMWGFX_DRIVER_MINOR) "."
1582                __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1583                "0");