vmwgfx_drv.c 46.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0 OR MIT
2 3
/**************************************************************************
 *
4
 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
27
#include <linux/module.h>
28
#include <linux/console.h>
29
#include <linux/dma-mapping.h>
30

31
#include <drm/drmP.h>
32
#include "vmwgfx_drv.h"
33
#include "vmwgfx_binding.h"
34
#include "ttm_object.h"
35 36 37
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
38 39 40 41 42

#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
#define VMWGFX_CHIP_SVGAII 0
#define VMW_FB_RESERVATION 0

43 44 45
#define VMW_MIN_INITIAL_WIDTH 800
#define VMW_MIN_INITIAL_HEIGHT 600

46 47 48 49 50 51
#ifndef VMWGFX_GIT_VERSION
#define VMWGFX_GIT_VERSION "Unknown"
#endif

#define VMWGFX_REPO "In Tree"

52 53
#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)

54

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
/**
 * Fully encoded drm commands. Might move to vmw_drm.h
 */

#define DRM_IOCTL_VMW_GET_PARAM					\
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
		 struct drm_vmw_getparam_arg)
#define DRM_IOCTL_VMW_ALLOC_DMABUF				\
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
		union drm_vmw_alloc_dmabuf_arg)
#define DRM_IOCTL_VMW_UNREF_DMABUF				\
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
		struct drm_vmw_unref_dmabuf_arg)
#define DRM_IOCTL_VMW_CURSOR_BYPASS				\
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
		 struct drm_vmw_cursor_bypass_arg)

#define DRM_IOCTL_VMW_CONTROL_STREAM				\
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
		 struct drm_vmw_control_stream_arg)
#define DRM_IOCTL_VMW_CLAIM_STREAM				\
	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
		 struct drm_vmw_stream_arg)
#define DRM_IOCTL_VMW_UNREF_STREAM				\
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
		 struct drm_vmw_stream_arg)

#define DRM_IOCTL_VMW_CREATE_CONTEXT				\
	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
		struct drm_vmw_context_arg)
#define DRM_IOCTL_VMW_UNREF_CONTEXT				\
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
		struct drm_vmw_context_arg)
#define DRM_IOCTL_VMW_CREATE_SURFACE				\
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
		 union drm_vmw_surface_create_arg)
#define DRM_IOCTL_VMW_UNREF_SURFACE				\
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
		 struct drm_vmw_surface_arg)
#define DRM_IOCTL_VMW_REF_SURFACE				\
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
		 union drm_vmw_surface_reference_arg)
#define DRM_IOCTL_VMW_EXECBUF					\
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
		struct drm_vmw_execbuf_arg)
100 101 102
#define DRM_IOCTL_VMW_GET_3D_CAP				\
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
		 struct drm_vmw_get_3d_cap_arg)
103 104 105
#define DRM_IOCTL_VMW_FENCE_WAIT				\
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
		 struct drm_vmw_fence_wait_arg)
106 107 108 109 110 111
#define DRM_IOCTL_VMW_FENCE_SIGNALED				\
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
		 struct drm_vmw_fence_signaled_arg)
#define DRM_IOCTL_VMW_FENCE_UNREF				\
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
		 struct drm_vmw_fence_arg)
Thomas Hellstrom's avatar
Thomas Hellstrom committed
112 113 114
#define DRM_IOCTL_VMW_FENCE_EVENT				\
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
		 struct drm_vmw_fence_event_arg)
115 116 117 118 119 120
#define DRM_IOCTL_VMW_PRESENT					\
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
		 struct drm_vmw_present_arg)
#define DRM_IOCTL_VMW_PRESENT_READBACK				\
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
		 struct drm_vmw_present_readback_arg)
121 122 123
#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
		 struct drm_vmw_update_layout_arg)
124 125 126 127 128 129
#define DRM_IOCTL_VMW_CREATE_SHADER				\
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
		 struct drm_vmw_shader_create_arg)
#define DRM_IOCTL_VMW_UNREF_SHADER				\
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
		 struct drm_vmw_shader_arg)
130 131 132 133 134 135
#define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
		 union drm_vmw_gb_surface_create_arg)
#define DRM_IOCTL_VMW_GB_SURFACE_REF				\
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
		 union drm_vmw_gb_surface_reference_arg)
136 137 138
#define DRM_IOCTL_VMW_SYNCCPU					\
	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
		 struct drm_vmw_synccpu_arg)
139 140 141
#define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
		struct drm_vmw_context_arg)
142 143 144 145 146 147
#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT				\
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT,	\
		union drm_vmw_gb_surface_create_ext_arg)
#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT				\
	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT,		\
		union drm_vmw_gb_surface_reference_ext_arg)
148 149 150 151 152 153 154

/**
 * The core DRM version of this macro doesn't account for
 * DRM_COMMAND_BASE.
 */

#define VMW_IOCTL_DEF(ioctl, func, flags) \
Ville Syrjälä's avatar
Ville Syrjälä committed
155
  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
156 157 158 159 160

/**
 * Ioctl definitions.
 */

Rob Clark's avatar
Rob Clark committed
161
static const struct drm_ioctl_desc vmw_ioctls[] = {
162
	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
163
		      DRM_AUTH | DRM_RENDER_ALLOW),
164
	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
165
		      DRM_AUTH | DRM_RENDER_ALLOW),
166
	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
167
		      DRM_RENDER_ALLOW),
168
	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
169
		      vmw_kms_cursor_bypass_ioctl,
170
		      DRM_MASTER),
171

172
	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
173
		      DRM_MASTER),
174
	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
175
		      DRM_MASTER),
176
	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
177
		      DRM_MASTER),
178

179
	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
180
		      DRM_AUTH | DRM_RENDER_ALLOW),
181
	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
182
		      DRM_RENDER_ALLOW),
183
	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
184
		      DRM_AUTH | DRM_RENDER_ALLOW),
185
	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
186
		      DRM_RENDER_ALLOW),
187
	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
188 189
		      DRM_AUTH | DRM_RENDER_ALLOW),
	VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
190
		      DRM_RENDER_ALLOW),
191
	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
192
		      DRM_RENDER_ALLOW),
193 194
	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
		      vmw_fence_obj_signaled_ioctl,
195
		      DRM_RENDER_ALLOW),
196
	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
197
		      DRM_RENDER_ALLOW),
198
	VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
199
		      DRM_AUTH | DRM_RENDER_ALLOW),
200
	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
201
		      DRM_AUTH | DRM_RENDER_ALLOW),
202 203 204

	/* these allow direct access to the framebuffers mark as master only */
	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
205
		      DRM_MASTER | DRM_AUTH),
206 207
	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
		      vmw_present_readback_ioctl,
208
		      DRM_MASTER | DRM_AUTH),
209 210 211 212 213
	/*
	 * The permissions of the below ioctl are overridden in
	 * vmw_generic_ioctl(). We require either
	 * DRM_MASTER or capable(CAP_SYS_ADMIN).
	 */
214 215
	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
		      vmw_kms_update_layout_ioctl,
216
		      DRM_RENDER_ALLOW),
217 218
	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
		      vmw_shader_define_ioctl,
219
		      DRM_AUTH | DRM_RENDER_ALLOW),
220 221
	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
		      vmw_shader_destroy_ioctl,
222
		      DRM_RENDER_ALLOW),
223 224
	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
		      vmw_gb_surface_define_ioctl,
225
		      DRM_AUTH | DRM_RENDER_ALLOW),
226 227
	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
		      vmw_gb_surface_reference_ioctl,
228
		      DRM_AUTH | DRM_RENDER_ALLOW),
229
	VMW_IOCTL_DEF(VMW_SYNCCPU,
230
		      vmw_user_bo_synccpu_ioctl,
231
		      DRM_RENDER_ALLOW),
232 233
	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
		      vmw_extended_context_define_ioctl,
234
		      DRM_AUTH | DRM_RENDER_ALLOW),
235 236 237 238 239 240
	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
		      vmw_gb_surface_define_ext_ioctl,
		      DRM_AUTH | DRM_RENDER_ALLOW),
	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
		      vmw_gb_surface_reference_ext_ioctl,
		      DRM_AUTH | DRM_RENDER_ALLOW),
241 242
};

243
static const struct pci_device_id vmw_pci_id_list[] = {
244 245 246
	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
	{0, 0, 0}
};
247
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
248

249
static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
250 251 252
static int vmw_force_iommu;
static int vmw_restrict_iommu;
static int vmw_force_coherent;
253
static int vmw_restrict_dma_mask;
254
static int vmw_assume_16bpp;
255 256 257

static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
258 259
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
			      void *ptr);
260

261
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
262
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
263
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
264
module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
265
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
266
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
267
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
268
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
269
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
270
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
271 272
MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
273

274

275 276 277 278 279 280 281 282 283
static void vmw_print_capabilities2(uint32_t capabilities2)
{
	DRM_INFO("Capabilities2:\n");
	if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
		DRM_INFO("  Grow oTable.\n");
	if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
		DRM_INFO("  IntraSurface copy.\n");
}

284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
static void vmw_print_capabilities(uint32_t capabilities)
{
	DRM_INFO("Capabilities:\n");
	if (capabilities & SVGA_CAP_RECT_COPY)
		DRM_INFO("  Rect copy.\n");
	if (capabilities & SVGA_CAP_CURSOR)
		DRM_INFO("  Cursor.\n");
	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
		DRM_INFO("  Cursor bypass.\n");
	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
		DRM_INFO("  Cursor bypass 2.\n");
	if (capabilities & SVGA_CAP_8BIT_EMULATION)
		DRM_INFO("  8bit emulation.\n");
	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
		DRM_INFO("  Alpha cursor.\n");
	if (capabilities & SVGA_CAP_3D)
		DRM_INFO("  3D.\n");
	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
		DRM_INFO("  Extended Fifo.\n");
	if (capabilities & SVGA_CAP_MULTIMON)
		DRM_INFO("  Multimon.\n");
	if (capabilities & SVGA_CAP_PITCHLOCK)
		DRM_INFO("  Pitchlock.\n");
	if (capabilities & SVGA_CAP_IRQMASK)
		DRM_INFO("  Irq mask.\n");
	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
		DRM_INFO("  Display Topology.\n");
	if (capabilities & SVGA_CAP_GMR)
		DRM_INFO("  GMR.\n");
	if (capabilities & SVGA_CAP_TRACES)
		DRM_INFO("  Traces.\n");
315 316 317 318
	if (capabilities & SVGA_CAP_GMR2)
		DRM_INFO("  GMR2.\n");
	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
		DRM_INFO("  Screen Object 2.\n");
319 320 321 322 323 324
	if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
		DRM_INFO("  Command Buffers.\n");
	if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
		DRM_INFO("  Command Buffers 2.\n");
	if (capabilities & SVGA_CAP_GBOBJECTS)
		DRM_INFO("  Guest Backed Resources.\n");
325 326
	if (capabilities & SVGA_CAP_DX)
		DRM_INFO("  DX Features.\n");
327 328
	if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
		DRM_INFO("  HP Command Queue.\n");
329 330
}

331
/**
332
 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
333
 *
334
 * @dev_priv: A device private structure.
335
 *
336 337 338 339 340
 * This function creates a small buffer object that holds the query
 * result for dummy queries emitted as query barriers.
 * The function will then map the first page and initialize a pending
 * occlusion query result structure, Finally it will unmap the buffer.
 * No interruptible waits are done within this function.
341
 *
342
 * Returns an error if bo creation or initialization fails.
343
 */
344
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
345
{
346
	int ret;
347
	struct vmw_buffer_object *vbo;
348 349 350 351
	struct ttm_bo_kmap_obj map;
	volatile SVGA3dQueryResult *result;
	bool dummy;

352
	/*
353
	 * Create the vbo as pinned, so that a tryreserve will
354 355 356
	 * immediately succeed. This is because we're the only
	 * user of the bo currently.
	 */
357 358 359
	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
	if (!vbo)
		return -ENOMEM;
360

361 362 363
	ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
			  &vmw_sys_ne_placement, false,
			  &vmw_bo_bo_free);
364
	if (unlikely(ret != 0))
365 366
		return ret;

367
	ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
368
	BUG_ON(ret != 0);
369
	vmw_bo_pin_reserved(vbo, true);
370

371
	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
372 373 374 375 376 377
	if (likely(ret == 0)) {
		result = ttm_kmap_obj_virtual(&map, &dummy);
		result->totalSize = sizeof(*result);
		result->state = SVGA3D_QUERYSTATE_PENDING;
		result->result32 = 0xff;
		ttm_bo_kunmap(&map);
378
	}
379 380
	vmw_bo_pin_reserved(vbo, false);
	ttm_bo_unreserve(&vbo->base);
381

382 383
	if (unlikely(ret != 0)) {
		DRM_ERROR("Dummy query buffer map failed.\n");
384
		vmw_bo_unreference(&vbo);
385
	} else
386
		dev_priv->dummy_query_bo = vbo;
387

388
	return ret;
389 390
}

391 392 393 394 395 396 397 398 399 400 401
/**
 * vmw_request_device_late - Perform late device setup
 *
 * @dev_priv: Pointer to device private.
 *
 * This function performs setup of otables and enables large command
 * buffer submission. These tasks are split out to a separate function
 * because it reverts vmw_release_device_early and is intended to be used
 * by an error path in the hibernation code.
 */
static int vmw_request_device_late(struct vmw_private *dev_priv)
402 403 404
{
	int ret;

405 406 407 408 409
	if (dev_priv->has_mob) {
		ret = vmw_otables_setup(dev_priv);
		if (unlikely(ret != 0)) {
			DRM_ERROR("Unable to initialize "
				  "guest Memory OBjects.\n");
410
			return ret;
411 412
		}
	}
413

414 415 416 417 418 419 420 421 422 423 424
	if (dev_priv->cman) {
		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
					       256*4096, 2*4096);
		if (ret) {
			struct vmw_cmdbuf_man *man = dev_priv->cman;

			dev_priv->cman = NULL;
			vmw_cmdbuf_man_destroy(man);
		}
	}

425 426 427
	return 0;
}

428 429 430 431 432 433 434 435 436
static int vmw_request_device(struct vmw_private *dev_priv)
{
	int ret;

	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
	if (unlikely(ret != 0)) {
		DRM_ERROR("Unable to initialize FIFO.\n");
		return ret;
	}
437
	vmw_fence_fifo_up(dev_priv->fman);
438
	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
439
	if (IS_ERR(dev_priv->cman)) {
440
		dev_priv->cman = NULL;
441
		dev_priv->has_dx = false;
442
	}
443 444 445 446 447

	ret = vmw_request_device_late(dev_priv);
	if (ret)
		goto out_no_mob;

448 449 450
	ret = vmw_dummy_query_bo_create(dev_priv);
	if (unlikely(ret != 0))
		goto out_no_query_bo;
451 452

	return 0;
453 454

out_no_query_bo:
455 456
	if (dev_priv->cman)
		vmw_cmdbuf_remove_pool(dev_priv->cman);
457 458
	if (dev_priv->has_mob) {
		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
459
		vmw_otables_takedown(dev_priv);
460
	}
461 462
	if (dev_priv->cman)
		vmw_cmdbuf_man_destroy(dev_priv->cman);
463
out_no_mob:
464 465 466
	vmw_fence_fifo_down(dev_priv->fman);
	vmw_fifo_release(dev_priv, &dev_priv->fifo);
	return ret;
467 468
}

469 470 471 472 473 474 475 476 477
/**
 * vmw_release_device_early - Early part of fifo takedown.
 *
 * @dev_priv: Pointer to device private struct.
 *
 * This is the first part of command submission takedown, to be called before
 * buffer management is taken down.
 */
static void vmw_release_device_early(struct vmw_private *dev_priv)
478
{
479 480 481 482 483 484 485
	/*
	 * Previous destructions should've released
	 * the pinned bo.
	 */

	BUG_ON(dev_priv->pinned_bo != NULL);

486
	vmw_bo_unreference(&dev_priv->dummy_query_bo);
487 488
	if (dev_priv->cman)
		vmw_cmdbuf_remove_pool(dev_priv->cman);
489

490 491
	if (dev_priv->has_mob) {
		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
492
		vmw_otables_takedown(dev_priv);
493
	}
494 495
}

496
/**
497 498 499 500 501 502
 * vmw_release_device_late - Late part of fifo takedown.
 *
 * @dev_priv: Pointer to device private struct.
 *
 * This is the last part of the command submission takedown, to be called when
 * command submission is no longer needed. It may wait on pending fences.
503
 */
504
static void vmw_release_device_late(struct vmw_private *dev_priv)
505
{
506
	vmw_fence_fifo_down(dev_priv->fman);
507 508
	if (dev_priv->cman)
		vmw_cmdbuf_man_destroy(dev_priv->cman);
509

510
	vmw_fifo_release(dev_priv, &dev_priv->fifo);
511 512
}

513 514 515 516
/**
 * Sets the initial_[width|height] fields on the given vmw_private.
 *
 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
517 518 519
 * clamping the value to fb_max_[width|height] fields and the
 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 * If the values appear to be invalid, set them to
520 521 522 523 524 525 526 527 528 529 530 531
 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
 */
static void vmw_get_initial_size(struct vmw_private *dev_priv)
{
	uint32_t width;
	uint32_t height;

	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);

	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
532 533 534 535 536 537 538 539 540 541 542

	if (width > dev_priv->fb_max_width ||
	    height > dev_priv->fb_max_height) {

		/*
		 * This is a host error and shouldn't occur.
		 */

		width = VMW_MIN_INITIAL_WIDTH;
		height = VMW_MIN_INITIAL_HEIGHT;
	}
543 544 545 546 547

	dev_priv->initial_width = width;
	dev_priv->initial_height = height;
}

548 549 550 551 552 553
/**
 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
 * system.
 *
 * @dev_priv: Pointer to a struct vmw_private
 *
554 555
 * This functions tries to determine what actions need to be taken by the
 * driver to make system pages visible to the device.
556 557 558 559 560 561 562 563 564
 * If this function decides that DMA is not possible, it returns -EINVAL.
 * The driver may then try to disable features of the device that require
 * DMA.
 */
static int vmw_dma_select_mode(struct vmw_private *dev_priv)
{
	static const char *names[vmw_dma_map_max] = {
		[vmw_dma_phys] = "Using physical TTM page addresses.",
		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
565
		[vmw_dma_map_populate] = "Caching DMA mappings.",
566 567
		[vmw_dma_map_bind] = "Giving up DMA mappings early."};

568 569
	if (vmw_force_coherent)
		dev_priv->map_mode = vmw_dma_alloc_coherent;
570 571
	else if (vmw_restrict_iommu)
		dev_priv->map_mode = vmw_dma_map_bind;
572 573
	else
		dev_priv->map_mode = vmw_dma_map_populate;
574

575 576 577
	/* No TTM coherent page pool? FIXME: Ask TTM instead! */
        if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
	    (dev_priv->map_mode == vmw_dma_alloc_coherent))
578
		return -EINVAL;
579

580 581 582 583
	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
	return 0;
}

584 585 586 587 588 589 590 591 592 593 594
/**
 * vmw_dma_masks - set required page- and dma masks
 *
 * @dev: Pointer to struct drm-device
 *
 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
 * restriction also for 64-bit systems.
 */
static int vmw_dma_masks(struct vmw_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
595
	int ret = 0;
596

597 598
	ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
	if (dev_priv->map_mode != vmw_dma_phys &&
599 600
	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
601
		return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
602
	}
603 604

	return ret;
605 606
}

607 608 609 610
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
	struct vmw_private *dev_priv;
	int ret;
611
	uint32_t svga_id;
612
	enum vmw_res_type i;
613
	bool refuse_dma = false;
614
	char host_log[100] = {0};
615 616

	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
617
	if (unlikely(!dev_priv)) {
618 619 620 621
		DRM_ERROR("Failed allocating a device private struct.\n");
		return -ENOMEM;
	}

622 623
	pci_set_master(dev->pdev);

624 625
	dev_priv->dev = dev;
	dev_priv->vmw_chipset = chipset;
626
	dev_priv->last_read_seqno = (uint32_t) -100;
627
	mutex_init(&dev_priv->cmdbuf_mutex);
628
	mutex_init(&dev_priv->release_mutex);
629
	mutex_init(&dev_priv->binding_mutex);
630
	mutex_init(&dev_priv->global_kms_state_mutex);
631
	ttm_lock_init(&dev_priv->reservation_sem);
632
	spin_lock_init(&dev_priv->resource_lock);
633 634 635
	spin_lock_init(&dev_priv->hw_lock);
	spin_lock_init(&dev_priv->waiter_lock);
	spin_lock_init(&dev_priv->cap_lock);
636
	spin_lock_init(&dev_priv->svga_lock);
637
	spin_lock_init(&dev_priv->cursor_lock);
638 639 640 641 642 643

	for (i = vmw_res_context; i < vmw_res_max; ++i) {
		idr_init(&dev_priv->res_idr[i]);
		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
	}

644 645 646
	mutex_init(&dev_priv->init_mutex);
	init_waitqueue_head(&dev_priv->fence_queue);
	init_waitqueue_head(&dev_priv->fifo_queue);
647
	dev_priv->fence_queue_waiters = 0;
648
	dev_priv->fifo_queue_waiters = 0;
649

650
	dev_priv->used_memory_size = 0;
651 652 653 654 655

	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);

656 657
	dev_priv->assume_16bpp = !!vmw_assume_16bpp;

658 659
	dev_priv->enable_fb = enable_fbdev;

660 661 662 663
	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
	if (svga_id != SVGA_ID_2) {
		ret = -ENOSYS;
664
		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
665 666 667
		goto out_err0;
	}

668
	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
669 670 671 672 673 674

	if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
		dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
	}


675 676 677 678 679
	ret = vmw_dma_select_mode(dev_priv);
	if (unlikely(ret != 0)) {
		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
		refuse_dma = true;
	}
680

681 682 683 684
	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
685 686 687

	vmw_get_initial_size(dev_priv);

688
	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
689 690
		dev_priv->max_gmr_ids =
			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
691 692 693 694
		dev_priv->max_gmr_pages =
			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
		dev_priv->memory_size =
			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
695 696 697 698 699 700 701
		dev_priv->memory_size -= dev_priv->vram_size;
	} else {
		/*
		 * An arbitrary limit of 512MiB on surface
		 * memory. But all HWV8 hardware supports GMR2.
		 */
		dev_priv->memory_size = 512*1024*1024;
702
	}
703
	dev_priv->max_mob_pages = 0;
704
	dev_priv->max_mob_size = 0;
705 706 707 708 709
	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
		uint64_t mem_size =
			vmw_read(dev_priv,
				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);

710 711 712 713 714
		/*
		 * Workaround for low memory 2D VMs to compensate for the
		 * allocation taken by fbdev
		 */
		if (!(dev_priv->capabilities & SVGA_CAP_3D))
715
			mem_size *= 3;
716

717
		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
718 719 720
		dev_priv->prim_bb_mem =
			vmw_read(dev_priv,
				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
721 722
		dev_priv->max_mob_size =
			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
723 724 725 726 727 728 729 730 731 732 733 734 735
		dev_priv->stdu_max_width =
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
		dev_priv->stdu_max_height =
			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);

		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
		dev_priv->texture_max_width = vmw_read(dev_priv,
						       SVGA_REG_DEV_CAP);
		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
		dev_priv->texture_max_height = vmw_read(dev_priv,
							SVGA_REG_DEV_CAP);
736 737 738
	} else {
		dev_priv->texture_max_width = 8192;
		dev_priv->texture_max_height = 8192;
739
		dev_priv->prim_bb_mem = dev_priv->vram_size;
740 741
	}

742
	vmw_print_capabilities(dev_priv->capabilities);
743 744
	if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
		vmw_print_capabilities2(dev_priv->capabilities2);
745

746
	ret = vmw_dma_masks(dev_priv);
747
	if (unlikely(ret != 0))
748 749 750
		goto out_err0;

	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
751 752
		DRM_INFO("Max GMR ids is %u\n",
			 (unsigned)dev_priv->max_gmr_ids);
753 754
		DRM_INFO("Max number of GMR pages is %u\n",
			 (unsigned)dev_priv->max_gmr_pages);
755 756
		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
			 (unsigned)dev_priv->memory_size / 1024);
757
	}
758 759
	DRM_INFO("Maximum display memory size is %u kiB\n",
		 dev_priv->prim_bb_mem / 1024);
760 761 762 763 764 765 766 767 768
	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
		 dev_priv->vram_start, dev_priv->vram_size / 1024);
	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);

	vmw_master_init(&dev_priv->fbdev_master);
	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
	dev_priv->active_master = &dev_priv->fbdev_master;

769 770
	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
				       dev_priv->mmio_size, MEMREMAP_WB);
771 772 773 774

	if (unlikely(dev_priv->mmio_virt == NULL)) {
		ret = -ENOMEM;
		DRM_ERROR("Failed mapping MMIO.\n");
775
		goto out_err0;
776 777
	}

778 779 780 781 782 783 784 785 786
	/* Need mmio memory to check for fifo pitchlock cap. */
	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
	    !vmw_fifo_have_pitchlock(dev_priv)) {
		ret = -ENOSYS;
		DRM_ERROR("Hardware has no pitchlock\n");
		goto out_err4;
	}

787 788
	dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
						&vmw_prime_dmabuf_ops);
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805

	if (unlikely(dev_priv->tdev == NULL)) {
		DRM_ERROR("Unable to initialize TTM object management.\n");
		ret = -ENOMEM;
		goto out_err4;
	}

	dev->dev_private = dev_priv;

	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
	dev_priv->stealth = (ret != 0);
	if (dev_priv->stealth) {
		/**
		 * Request at least the mmio PCI resource.
		 */

		DRM_INFO("It appears like vesafb is loaded. "
806
			 "Ignore above error if any.\n");
807 808 809 810 811 812
		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
		if (unlikely(ret != 0)) {
			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
			goto out_no_device;
		}
	}
813

814
	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
815
		ret = vmw_irq_install(dev, dev->pdev->irq);
816 817 818 819 820 821
		if (ret != 0) {
			DRM_ERROR("Failed installing irq: %d\n", ret);
			goto out_no_irq;
		}
	}

822
	dev_priv->fman = vmw_fence_manager_init(dev_priv);
823 824
	if (unlikely(dev_priv->fman == NULL)) {
		ret = -ENOMEM;
825
		goto out_no_fman;
826
	}
827

828 829 830 831 832 833 834 835 836
	ret = ttm_bo_device_init(&dev_priv->bdev,
				 &vmw_bo_driver,
				 dev->anon_inode->i_mapping,
				 VMWGFX_FILE_PAGE_OFFSET,
				 false);
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
		goto out_no_bdev;
	}
837

838 839 840 841
	/*
	 * Enable VRAM, but initially don't use it until SVGA is enabled and
	 * unhidden.
	 */
842 843 844 845 846 847
	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
			     (dev_priv->vram_size >> PAGE_SHIFT));
	if (unlikely(ret != 0)) {
		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
		goto out_no_vram;
	}
848
	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868

	dev_priv->has_gmr = true;
	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
					 VMW_PL_GMR) != 0) {
		DRM_INFO("No GMR memory available. "
			 "Graphics memory resources are very limited.\n");
		dev_priv->has_gmr = false;
	}

	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
		dev_priv->has_mob = true;
		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
				   VMW_PL_MOB) != 0) {
			DRM_INFO("No MOB memory available. "
				 "3D will be disabled.\n");
			dev_priv->has_mob = false;
		}
	}

869 870
	if (dev_priv->has_mob) {
		spin_lock(&dev_priv->cap_lock);
871
		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
872 873 874 875
		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
		spin_unlock(&dev_priv->cap_lock);
	}

876
	vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
877 878 879
	ret = vmw_kms_init(dev_priv);
	if (unlikely(ret != 0))
		goto out_no_kms;
880
	vmw_overlay_init(dev_priv);
881

882 883 884 885
	ret = vmw_request_device(dev_priv);
	if (ret)
		goto out_no_fifo;

Deepak Rawat's avatar
Deepak Rawat committed
886 887 888 889 890 891 892 893 894 895 896 897 898
	if (dev_priv->has_dx) {
		/*
		 * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
		 * support
		 */
		if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
			vmw_write(dev_priv, SVGA_REG_DEV_CAP,
					SVGA3D_DEVCAP_SM41);
			dev_priv->has_sm4_1 = vmw_read(dev_priv,
							SVGA_REG_DEV_CAP);
		}
	}

899
	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
Deepak Rawat's avatar
Deepak Rawat committed
900 901 902
	DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
		 ? "yes." : "no.");
	DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
903

904 905 906 907 908 909 910 911 912 913
	snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
		VMWGFX_REPO, VMWGFX_GIT_VERSION);
	vmw_host_log(host_log);

	memset(host_log, 0, sizeof(host_log));
	snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
		VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
		VMWGFX_DRIVER_PATCHLEVEL);
	vmw_host_log(host_log);

914
	if (dev_priv->enable_fb) {
915 916
		vmw_fifo_resource_inc(dev_priv);
		vmw_svga_enable(dev_priv);
917
		vmw_fb_init(dev_priv);
918 919
	}

920 921 922
	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
	register_pm_notifier(&dev_priv->pm_nb);

923 924
	return 0;

925
out_no_fifo:
926 927 928
	vmw_overlay_close(dev_priv);
	vmw_kms_close(dev_priv);
out_no_kms:
929 930 931 932 933 934
	if (dev_priv->has_mob)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
	if (dev_priv->has_gmr)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_no_vram:
935 936
	(void)ttm_bo_device_release(&dev_priv->bdev);
out_no_bdev:
937 938
	vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
939
	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
940
		vmw_irq_uninstall(dev_priv->dev);
941
out_no_irq:
942 943 944 945
	if (dev_priv->stealth)
		pci_release_region(dev->pdev, 2);
	else
		pci_release_regions(dev->pdev);
946 947 948
out_no_device:
	ttm_object_device_release(&dev_priv->tdev);
out_err4:
949
	memunmap(dev_priv->mmio_virt);
950
out_err0:
951 952 953
	for (i = vmw_res_context; i < vmw_res_max; ++i)
		idr_destroy(&dev_priv->res_idr[i]);

954 955
	if (dev_priv->ctx.staged_bindings)
		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
956 957 958 959
	kfree(dev_priv);
	return ret;
}

960
static void vmw_driver_unload(struct drm_device *dev)
961 962
{
	struct vmw_private *dev_priv = vmw_priv(dev);
963
	enum vmw_res_type i;
964

965 966
	unregister_pm_notifier(&dev_priv->pm_nb);

967 968
	if (dev_priv->ctx.res_ht_initialized)
		drm_ht_remove(&dev_priv->ctx.res_ht);
969
	vfree(dev_priv->ctx.cmd_bounce);
970
	if (dev_priv->enable_fb) {
971
		vmw_fb_off(dev_priv);
972
		vmw_fb_close(dev_priv);
973 974
		vmw_fifo_resource_dec(dev_priv);
		vmw_svga_disable(dev_priv);
975
	}
976

977 978
	vmw_kms_close(dev_priv);
	vmw_overlay_close(dev_priv);
979 980 981 982 983

	if (dev_priv->has_gmr)
		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);

984 985 986 987 988
	vmw_release_device_early(dev_priv);
	if (dev_priv->has_mob)
		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
	(void) ttm_bo_device_release(&dev_priv->bdev);
	vmw_release_device_late(dev_priv);
989
	vmw_fence_manager_takedown(dev_priv->fman);
990
	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
991
		vmw_irq_uninstall(dev_priv->dev);
992
	if (dev_priv->stealth)
993
		pci_release_region(dev->pdev, 2);
994 995 996
	else
		pci_release_regions(dev->pdev);

997
	ttm_object_device_release(&dev_priv->tdev);
998
	memunmap(dev_priv->mmio_virt);
999 1000
	if (dev_priv->ctx.staged_bindings)
		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1001 1002 1003

	for (i = vmw_res_context; i < vmw_res_max; ++i)
		idr_destroy(&dev_priv->res_idr[i]);
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013

	kfree(dev_priv);
}

static void vmw_postclose(struct drm_device *dev,
			 struct drm_file *file_priv)
{
	struct vmw_fpriv *vmw_fp;

	vmw_fp = vmw_fpriv(file_priv);
1014 1015 1016 1017 1018 1019 1020

	if (vmw_fp->locked_master) {
		struct vmw_master *vmaster =
			vmw_master(vmw_fp->locked_master);

		ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
		ttm_vt_unlock(&vmaster->lock);
1021
		drm_master_put(&vmw_fp->locked_master);
1022 1023 1024
	}

	ttm_object_file_release(&vmw_fp->tfile);
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
	kfree(vmw_fp);
}

static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
{
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct vmw_fpriv *vmw_fp;
	int ret = -ENOMEM;

	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1035
	if (unlikely(!vmw_fp))
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
		return ret;

	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
	if (unlikely(vmw_fp->tfile == NULL))
		goto out_no_tfile;

	file_priv->driver_priv = vmw_fp;

	return 0;

out_no_tfile:
	kfree(vmw_fp);
	return ret;
}

1051 1052 1053 1054 1055 1056 1057 1058
static struct vmw_master *vmw_master_check(struct drm_device *dev,
					   struct drm_file *file_priv,
					   unsigned int flags)
{
	int ret;
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
	struct vmw_master *vmaster;

1059
	if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
1060 1061 1062 1063 1064 1065
		return NULL;

	ret = mutex_lock_interruptible(&dev->master_mutex);
	if (unlikely(ret != 0))
		return ERR_PTR(-ERESTARTSYS);

1066
	if (drm_is_current_master(file_priv)) {
1067 1068 1069 1070 1071
		mutex_unlock(&dev->master_mutex);
		return NULL;
	}

	/*
1072 1073
	 * Check if we were previously master, but now dropped. In that
	 * case, allow at least render node functionality.
1074 1075 1076
	 */
	if (vmw_fp->locked_master) {
		mutex_unlock(&dev->master_mutex);
1077 1078 1079 1080

		if (flags & DRM_RENDER_ALLOW)
			return NULL;

1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
		DRM_ERROR("Dropped master trying to access ioctl that "
			  "requires authentication.\n");
		return ERR_PTR(-EACCES);
	}
	mutex_unlock(&dev->master_mutex);

	/*
	 * Take the TTM lock. Possibly sleep waiting for the authenticating
	 * master to become master again, or for a SIGTERM if the
	 * authenticating master exits.
	 */
	vmaster = vmw_master(file_priv->master);
	ret = ttm_read_lock(&vmaster->lock, true);
	if (unlikely(ret != 0))
		vmaster = ERR_PTR(ret);

	return vmaster;
}

static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
			      unsigned long arg,
			      long (*ioctl_func)(struct file *, unsigned int,
						 unsigned long))
1104 1105 1106 1107
{
	struct drm_file *file_priv = filp->private_data;
	struct drm_device *dev = file_priv->minor->dev;
	unsigned int nr = DRM_IOCTL_NR(cmd);
1108 1109 1110
	struct vmw_master *vmaster;
	unsigned int flags;
	long ret;
1111 1112

	/*
1113
	 * Do extra checking on driver private ioctls.
1114 1115 1116 1117
	 */

	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
Rob Clark's avatar
Rob Clark committed
1118
		const struct drm_ioctl_desc *ioctl =
1119
			&vmw_ioctls[nr - DRM_COMMAND_BASE];
1120

1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
			ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
			if (unlikely(ret != 0))
				return ret;

			if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
				goto out_io_encoding;

			return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
							_IOC_SIZE(cmd));
1131 1132 1133 1134
		} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
			if (!drm_is_current_master(file_priv) &&
			    !capable(CAP_SYS_ADMIN))
				return -EACCES;
1135
		}
1136 1137 1138 1139

		if (unlikely(ioctl->cmd != cmd))
			goto out_io_encoding;

1140 1141 1142 1143 1144
		flags = ioctl->flags;
	} else if (!drm_ioctl_flags(nr, &flags))
		return -EINVAL;

	vmaster = vmw_master_check(dev, file_priv, flags);
1145
	if (IS_ERR(vmaster)) {
1146 1147 1148 1149 1150 1151
		ret = PTR_ERR(vmaster);

		if (ret != -ERESTARTSYS)
			DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
				 nr, ret);
		return ret;
1152 1153
	}

1154 1155 1156 1157 1158
	ret = ioctl_func(filp, cmd, arg);
	if (vmaster)
		ttm_read_unlock(&vmaster->lock);

	return ret;
1159 1160 1161 1162 1163 1164

out_io_encoding:
	DRM_ERROR("Invalid command format, ioctl %d\n",
		  nr - DRM_COMMAND_BASE);

	return -EINVAL;
1165 1166 1167 1168 1169 1170
}

static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
			       unsigned long arg)
{
	return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1171 1172
}

1173 1174 1175 1176 1177 1178 1179 1180
#ifdef CONFIG_COMPAT
static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
			     unsigned long arg)
{
	return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
}
#endif

1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
static void vmw_lastclose(struct drm_device *dev)
{
}

static void vmw_master_init(struct vmw_master *vmaster)
{
	ttm_lock_init(&vmaster->lock);
}

static int vmw_master_create(struct drm_device *dev,
			     struct drm_master *master)
{
	struct vmw_master *vmaster;

	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1196
	if (unlikely(!vmaster))
1197 1198
		return -ENOMEM;

1199
	vmw_master_init(vmaster);
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
	master->driver_priv = vmaster;

	return 0;
}

static void vmw_master_destroy(struct drm_device *dev,
			       struct drm_master *master)
{
	struct vmw_master *vmaster = vmw_master(master);

	master->driver_priv = NULL;
	kfree(vmaster);
}

static int vmw_master_set(struct drm_device *dev,
			  struct drm_file *file_priv,
			  bool from_open)
{
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
	struct vmw_master *active = dev_priv->active_master;
	struct vmw_master *vmaster = vmw_master(file_priv->master);
	int ret = 0;

	if (active) {
		BUG_ON(active != &dev_priv->fbdev_master);
		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
		if (unlikely(ret != 0))
1229
			return ret;
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242

		ttm_lock_set_kill(&active->lock, true, SIGTERM);
		dev_priv->active_master = NULL;
	}

	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
	if (!from_open) {
		ttm_vt_unlock(&vmaster->lock);
		BUG_ON(vmw_fp->locked_master != file_priv->master);
		drm_master_put(&vmw_fp->locked_master);
	}

	dev_priv->active_master = vmaster;
1243
	drm_sysfs_hotplug_event(dev);
1244 1245 1246 1247 1248

	return 0;
}

static void vmw_master_drop(struct drm_device *dev,
1249
			    struct drm_file *file_priv)
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
{
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
	struct vmw_master *vmaster = vmw_master(file_priv->master);
	int ret;

	/**
	 * Make sure the master doesn't disappear while we have
	 * it locked.
	 */

	vmw_fp->locked_master = drm_master_get(file_priv->master);
	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1263
	vmw_kms_legacy_hotspot_clear(dev_priv);
1264 1265 1266 1267 1268
	if (unlikely((ret != 0))) {
		DRM_ERROR("Unable to lock TTM at VT switch.\n");
		drm_master_put(&vmw_fp->locked_master);
	}

1269
	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1270

1271 1272
	if (!dev_priv->enable_fb)
		vmw_svga_disable(dev_priv);
1273

1274 1275 1276 1277 1278
	dev_priv->active_master = &dev_priv->fbdev_master;
	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
}

1279 1280 1281 1282 1283 1284
/**
 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
 *
 * @dev_priv: Pointer to device private struct.
 * Needs the reservation sem to be held in non-exclusive mode.
 */
1285
static void __vmw_svga_enable(struct vmw_private *dev_priv)
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
{
	spin_lock(&dev_priv->svga_lock);
	if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
		dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
	}
	spin_unlock(&dev_priv->svga_lock);
}

/**
 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
 *
 * @dev_priv: Pointer to device private struct.
 */
void vmw_svga_enable(struct vmw_private *dev_priv)
{
1302
	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
	__vmw_svga_enable(dev_priv);
	ttm_read_unlock(&dev_priv->reservation_sem);
}

/**
 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
 *
 * @dev_priv: Pointer to device private struct.
 * Needs the reservation sem to be held in exclusive mode.
 * Will not empty VRAM. VRAM must be emptied by caller.
 */
1314
static void __vmw_svga_disable(struct vmw_private *dev_priv)
1315 1316 1317 1318 1319
{
	spin_lock(&dev_priv->svga_lock);
	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
		vmw_write(dev_priv, SVGA_REG_ENABLE,
1320 1321
			  SVGA_REG_ENABLE_HIDE |
			  SVGA_REG_ENABLE_ENABLE);
1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334
	}
	spin_unlock(&dev_priv->svga_lock);
}

/**
 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
 * running.
 *
 * @dev_priv: Pointer to device private struct.
 * Will empty VRAM.
 */
void vmw_svga_disable(struct vmw_private *dev_priv)
{
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
	/*
	 * Disabling SVGA will turn off device modesetting capabilities, so
	 * notify KMS about that so that it doesn't cache atomic state that
	 * isn't valid anymore, for example crtcs turned on.
	 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
	 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
	 * end up with lock order reversal. Thus, a master may actually perform
	 * a new modeset just after we call vmw_kms_lost_device() and race with
	 * vmw_svga_disable(), but that should at worst cause atomic KMS state
	 * to be inconsistent with the device, causing modesetting problems.
	 *
	 */
	vmw_kms_lost_device(dev_priv->dev);
1348 1349 1350 1351 1352 1353 1354
	ttm_write_lock(&dev_priv->reservation_sem, false);
	spin_lock(&dev_priv->svga_lock);
	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
		spin_unlock(&dev_priv->svga_lock);
		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
			DRM_ERROR("Failed evicting VRAM buffers.\n");
1355 1356 1357
		vmw_write(dev_priv, SVGA_REG_ENABLE,
			  SVGA_REG_ENABLE_HIDE |
			  SVGA_REG_ENABLE_ENABLE);
1358 1359 1360 1361
	} else
		spin_unlock(&dev_priv->svga_lock);
	ttm_write_unlock(&dev_priv->reservation_sem);
}
1362 1363 1364 1365 1366

static void vmw_remove(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

1367
	pci_disable_device(pdev);
1368 1369 1370
	drm_put_dev(dev);
}

1371 1372 1373 1374 1375 1376 1377 1378
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
			      void *ptr)
{
	struct vmw_private *dev_priv =
		container_of(nb, struct vmw_private, pm_nb);

	switch (val) {
	case PM_HIBERNATION_PREPARE:
1379
		/*
1380 1381 1382 1383 1384 1385
		 * Take the reservation sem in write mode, which will make sure
		 * there are no other processes holding a buffer object
		 * reservation, meaning we should be able to evict all buffer
		 * objects if needed.
		 * Once user-space processes have been frozen, we can release
		 * the lock again.
1386
		 */
1387 1388
		ttm_suspend_lock(&dev_priv->reservation_sem);
		dev_priv->suspend_locked = true;
1389 1390
		break;
	case PM_POST_HIBERNATION:
1391
	case PM_POST_RESTORE:
1392 1393 1394 1395
		if (READ_ONCE(dev_priv->suspend_locked)) {
			dev_priv->suspend_locked = false;
			ttm_suspend_unlock(&dev_priv->reservation_sem);
		}
1396 1397 1398 1399 1400 1401 1402
		break;
	default:
		break;
	}
	return 0;
}

1403
static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1404
{
1405 1406 1407
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct vmw_private *dev_priv = vmw_priv(dev);

1408
	if (dev_priv->refuse_hibernation)
1409 1410
		return -EBUSY;

1411 1412 1413 1414 1415 1416
	pci_save_state(pdev);
	pci_disable_device(pdev);
	pci_set_power_state(pdev, PCI_D3hot);
	return 0;
}

1417
static int vmw_pci_resume(struct pci_dev *pdev)
1418 1419 1420 1421 1422 1423
{
	pci_set_power_state(pdev, PCI_D0);
	pci_restore_state(pdev);
	return pci_enable_device(pdev);
}

1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
static int vmw_pm_suspend(struct device *kdev)
{
	struct pci_dev *pdev = to_pci_dev(kdev);
	struct pm_message dummy;

	dummy.event = 0;

	return vmw_pci_suspend(pdev, dummy);
}

static int vmw_pm_resume(struct device *kdev)
{
	struct pci_dev *pdev = to_pci_dev(kdev);

	return vmw_pci_resume(pdev);
}

1441
static int vmw_pm_freeze(struct device *kdev)
1442 1443 1444 1445
{
	struct pci_dev *pdev = to_pci_dev(kdev);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct vmw_private *dev_priv = vmw_priv(dev);
1446
	int ret;
1447

1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
	/*
	 * Unlock for vmw_kms_suspend.
	 * No user-space processes should be running now.
	 */
	ttm_suspend_unlock(&dev_priv->reservation_sem);
	ret = vmw_kms_suspend(dev_priv->dev);
	if (ret) {
		ttm_suspend_lock(&dev_priv->reservation_sem);
		DRM_ERROR("Failed to freeze modesetting.\n");
		return ret;
	}
1459
	if (dev_priv->enable_fb)
1460
		vmw_fb_off(dev_priv);
1461

1462 1463 1464 1465 1466 1467 1468
	ttm_suspend_lock(&dev_priv->reservation_sem);
	vmw_execbuf_release_pinned_bo(dev_priv);
	vmw_resource_evict_all(dev_priv);
	vmw_release_device_early(dev_priv);
	ttm_bo_swapout_all(&dev_priv->bdev);
	if (dev_priv->enable_fb)
		vmw_fifo_resource_dec(dev_priv);
1469 1470
	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1471
		if (dev_priv->enable_fb)
1472 1473
			vmw_fifo_resource_inc(dev_priv);
		WARN_ON(vmw_request_device_late(dev_priv));
1474 1475 1476 1477 1478 1479
		dev_priv->suspend_locked = false;
		ttm_suspend_unlock(&dev_priv->reservation_sem);
		if (dev_priv->suspend_state)
			vmw_kms_resume(dev);
		if (dev_priv->enable_fb)
			vmw_fb_on(dev_priv);
1480 1481 1482
		return -EBUSY;
	}

1483 1484
	vmw_fence_fifo_down(dev_priv->fman);
	__vmw_svga_disable(dev_priv);
1485 1486
	
	vmw_release_device_late(dev_priv);
1487 1488 1489
	return 0;
}

1490
static int vmw_pm_restore(struct device *kdev)
1491 1492 1493 1494
{
	struct pci_dev *pdev = to_pci_dev(kdev);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct vmw_private *dev_priv = vmw_priv(dev);
1495
	int ret;
1496

1497 1498 1499
	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
	(void) vmw_read(dev_priv, SVGA_REG_ID);

1500
	if (dev_priv->enable_fb)
1501 1502 1503 1504 1505 1506 1507 1508
		vmw_fifo_resource_inc(dev_priv);

	ret = vmw_request_device(dev_priv);
	if (ret)
		return ret;

	if (dev_priv->enable_fb)
		__vmw_svga_enable(dev_priv);
1509

1510 1511 1512 1513 1514 1515 1516 1517 1518
	vmw_fence_fifo_up(dev_priv->fman);
	dev_priv->suspend_locked = false;
	ttm_suspend_unlock(&dev_priv->reservation_sem);
	if (dev_priv->suspend_state)
		vmw_kms_resume(dev_priv->dev);

	if (dev_priv->enable_fb)
		vmw_fb_on(dev_priv);

1519
	return 0;
1520 1521 1522
}

static const struct dev_pm_ops vmw_pm_ops = {
1523 1524 1525
	.freeze = vmw_pm_freeze,
	.thaw = vmw_pm_restore,
	.restore = vmw_pm_restore,
1526 1527 1528 1529
	.suspend = vmw_pm_suspend,
	.resume = vmw_pm_resume,
};

1530 1531 1532 1533 1534 1535 1536 1537 1538
static const struct file_operations vmwgfx_driver_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
	.release = drm_release,
	.unlocked_ioctl = vmw_unlocked_ioctl,
	.mmap = vmw_mmap,
	.poll = vmw_fops_poll,
	.read = vmw_fops_read,
#if defined(CONFIG_COMPAT)
1539
	.compat_ioctl = vmw_compat_ioctl,
1540 1541 1542 1543
#endif
	.llseek = noop_llseek,
};

1544 1545
static struct drm_driver driver = {
	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1546
	DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
1547 1548 1549
	.load = vmw_driver_load,
	.unload = vmw_driver_unload,
	.lastclose = vmw_lastclose,
1550
	.get_vblank_counter = vmw_get_vblank_counter,
Jakob Bornecrantz's avatar
Jakob Bornecrantz committed
1551 1552
	.enable_vblank = vmw_enable_vblank,
	.disable_vblank = vmw_disable_vblank,
1553
	.ioctls = vmw_ioctls,
1554
	.num_ioctls = ARRAY_SIZE(vmw_ioctls),
1555 1556 1557 1558 1559 1560
	.master_create = vmw_master_create,
	.master_destroy = vmw_master_destroy,
	.master_set = vmw_master_set,
	.master_drop = vmw_master_drop,
	.open = vmw_driver_open,
	.postclose = vmw_postclose,
Dave Airlie's avatar
Dave Airlie committed
1561 1562 1563 1564 1565

	.dumb_create = vmw_dumb_create,
	.dumb_map_offset = vmw_dumb_map_offset,
	.dumb_destroy = vmw_dumb_destroy,

1566 1567 1568
	.prime_fd_to_handle = vmw_prime_fd_to_handle,
	.prime_handle_to_fd = vmw_prime_handle_to_fd,

1569
	.fops = &vmwgfx_driver_fops,
1570 1571 1572 1573 1574 1575 1576 1577
	.name = VMWGFX_DRIVER_NAME,
	.desc = VMWGFX_DRIVER_DESC,
	.date = VMWGFX_DRIVER_DATE,
	.major = VMWGFX_DRIVER_MAJOR,
	.minor = VMWGFX_DRIVER_MINOR,
	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
};

1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
static struct pci_driver vmw_pci_driver = {
	.name = VMWGFX_DRIVER_NAME,
	.id_table = vmw_pci_id_list,
	.probe = vmw_probe,
	.remove = vmw_remove,
	.driver = {
		.pm = &vmw_pm_ops
	}
};

1588 1589
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
1590
	return drm_get_pci_dev(pdev, ent, &driver);
1591 1592 1593 1594 1595
}

static int __init vmwgfx_init(void)
{
	int ret;
1596 1597 1598 1599

	if (vgacon_text_force())
		return -EINVAL;

1600
	ret = pci_register_driver(&vmw_pci_driver);
1601 1602 1603 1604 1605 1606 1607
	if (ret)
		DRM_ERROR("Failed initializing DRM.\n");
	return ret;
}

static void __exit vmwgfx_exit(void)
{
1608
	pci_unregister_driver(&vmw_pci_driver);
1609 1610 1611 1612 1613 1614 1615 1616
}

module_init(vmwgfx_init);
module_exit(vmwgfx_exit);

MODULE_AUTHOR("VMware Inc. and others");
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
MODULE_LICENSE("GPL and additional rights");
1617 1618 1619 1620
MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
	       __stringify(VMWGFX_DRIVER_MINOR) "."
	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
	       "0");