vfio_main.c 41.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
Alex Williamson's avatar
Alex Williamson committed
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * VFIO core
 *
 * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
 *     Author: Alex Williamson <alex.williamson@redhat.com>
 *
 * Derived from original vfio:
 * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
 * Author: Tom Lyon, pugs@cisco.com
 */

#include <linux/cdev.h>
#include <linux/compat.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/iommu.h>
19 20 21
#ifdef CONFIG_HAVE_KVM
#include <linux/kvm_host.h>
#endif
Alex Williamson's avatar
Alex Williamson committed
22
#include <linux/list.h>
23
#include <linux/miscdevice.h>
Alex Williamson's avatar
Alex Williamson committed
24 25
#include <linux/module.h>
#include <linux/mutex.h>
26
#include <linux/pci.h>
27
#include <linux/rwsem.h>
Alex Williamson's avatar
Alex Williamson committed
28 29
#include <linux/sched.h>
#include <linux/slab.h>
30
#include <linux/stat.h>
Alex Williamson's avatar
Alex Williamson committed
31 32 33 34
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/wait.h>
35
#include <linux/sched/signal.h>
36
#include <linux/pm_runtime.h>
37 38
#include <linux/interval_tree.h>
#include <linux/iova_bitmap.h>
39
#include <linux/iommufd.h>
40
#include "vfio.h"
Alex Williamson's avatar
Alex Williamson committed
41 42 43 44 45 46

#define DRIVER_VERSION	"0.3"
#define DRIVER_AUTHOR	"Alex Williamson <alex.williamson@redhat.com>"
#define DRIVER_DESC	"VFIO - User Level meta-driver"

static struct vfio {
47 48
	struct class			*device_class;
	struct ida			device_ida;
Alex Williamson's avatar
Alex Williamson committed
49 50
} vfio;

51 52 53 54 55 56 57
#ifdef CONFIG_VFIO_NOIOMMU
bool vfio_noiommu __read_mostly;
module_param_named(enable_unsafe_noiommu_mode,
		   vfio_noiommu, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode.  This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel.  If you do not know what this is for, step away. (default: false)");
#endif

58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
static DEFINE_XARRAY(vfio_device_set_xa);

int vfio_assign_device_set(struct vfio_device *device, void *set_id)
{
	unsigned long idx = (unsigned long)set_id;
	struct vfio_device_set *new_dev_set;
	struct vfio_device_set *dev_set;

	if (WARN_ON(!set_id))
		return -EINVAL;

	/*
	 * Atomically acquire a singleton object in the xarray for this set_id
	 */
	xa_lock(&vfio_device_set_xa);
	dev_set = xa_load(&vfio_device_set_xa, idx);
	if (dev_set)
		goto found_get_ref;
	xa_unlock(&vfio_device_set_xa);

	new_dev_set = kzalloc(sizeof(*new_dev_set), GFP_KERNEL);
	if (!new_dev_set)
		return -ENOMEM;
	mutex_init(&new_dev_set->lock);
	INIT_LIST_HEAD(&new_dev_set->device_list);
	new_dev_set->set_id = set_id;

	xa_lock(&vfio_device_set_xa);
	dev_set = __xa_cmpxchg(&vfio_device_set_xa, idx, NULL, new_dev_set,
			       GFP_KERNEL);
	if (!dev_set) {
		dev_set = new_dev_set;
		goto found_get_ref;
	}

	kfree(new_dev_set);
	if (xa_is_err(dev_set)) {
		xa_unlock(&vfio_device_set_xa);
		return xa_err(dev_set);
	}

found_get_ref:
	dev_set->device_count++;
	xa_unlock(&vfio_device_set_xa);
	mutex_lock(&dev_set->lock);
	device->dev_set = dev_set;
	list_add_tail(&device->dev_set_list, &dev_set->device_list);
	mutex_unlock(&dev_set->lock);
	return 0;
}
EXPORT_SYMBOL_GPL(vfio_assign_device_set);

static void vfio_release_device_set(struct vfio_device *device)
{
	struct vfio_device_set *dev_set = device->dev_set;

	if (!dev_set)
		return;

	mutex_lock(&dev_set->lock);
	list_del(&device->dev_set_list);
	mutex_unlock(&dev_set->lock);

	xa_lock(&vfio_device_set_xa);
	if (!--dev_set->device_count) {
		__xa_erase(&vfio_device_set_xa,
			   (unsigned long)dev_set->set_id);
		mutex_destroy(&dev_set->lock);
		kfree(dev_set);
	}
	xa_unlock(&vfio_device_set_xa);
}

131 132 133 134 135 136 137 138 139 140 141 142 143
unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set)
{
	struct vfio_device *cur;
	unsigned int open_count = 0;

	lockdep_assert_held(&dev_set->lock);

	list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
		open_count += cur->open_count;
	return open_count;
}
EXPORT_SYMBOL_GPL(vfio_device_set_open_count);

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
struct vfio_device *
vfio_find_device_in_devset(struct vfio_device_set *dev_set,
			   struct device *dev)
{
	struct vfio_device *cur;

	lockdep_assert_held(&dev_set->lock);

	list_for_each_entry(cur, &dev_set->device_list, dev_set_list)
		if (cur->dev == dev)
			return cur;
	return NULL;
}
EXPORT_SYMBOL_GPL(vfio_find_device_in_devset);

159
/*
Alex Williamson's avatar
Alex Williamson committed
160 161 162
 * Device objects - create, release, get, put, search
 */
/* Device reference always implies a group reference */
163
void vfio_device_put_registration(struct vfio_device *device)
Alex Williamson's avatar
Alex Williamson committed
164
{
165 166
	if (refcount_dec_and_test(&device->refcount))
		complete(&device->comp);
Alex Williamson's avatar
Alex Williamson committed
167 168
}

169
bool vfio_device_try_get_registration(struct vfio_device *device)
Alex Williamson's avatar
Alex Williamson committed
170
{
171
	return refcount_inc_not_zero(&device->refcount);
Alex Williamson's avatar
Alex Williamson committed
172 173
}

174
/*
Alex Williamson's avatar
Alex Williamson committed
175 176
 * VFIO driver API
 */
177
/* Release helper called by vfio_put_device() */
178
static void vfio_device_release(struct device *dev)
179 180
{
	struct vfio_device *device =
181
			container_of(dev, struct vfio_device, device);
182

183
	vfio_release_device_set(device);
184
	ida_free(&vfio.device_ida, device->index);
185

186 187 188 189
	if (device->ops->release)
		device->ops->release(device);

	kvfree(device);
190 191
}

192 193 194
static int vfio_init_device(struct vfio_device *device, struct device *dev,
			    const struct vfio_device_ops *ops);

195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
/*
 * Allocate and initialize vfio_device so it can be registered to vfio
 * core.
 *
 * Drivers should use the wrapper vfio_alloc_device() for allocation.
 * @size is the size of the structure to be allocated, including any
 * private data used by the driver.
 *
 * Driver may provide an @init callback to cover device private data.
 *
 * Use vfio_put_device() to release the structure after success return.
 */
struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev,
				       const struct vfio_device_ops *ops)
{
	struct vfio_device *device;
	int ret;

	if (WARN_ON(size < sizeof(struct vfio_device)))
		return ERR_PTR(-EINVAL);

	device = kvzalloc(size, GFP_KERNEL);
	if (!device)
		return ERR_PTR(-ENOMEM);

	ret = vfio_init_device(device, dev, ops);
	if (ret)
		goto out_free;
	return device;

out_free:
	kvfree(device);
	return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(_vfio_alloc_device);

/*
 * Initialize a vfio_device so it can be registered to vfio core.
 */
234 235
static int vfio_init_device(struct vfio_device *device, struct device *dev,
			    const struct vfio_device_ops *ops)
236 237 238
{
	int ret;

239 240 241 242 243 244 245
	ret = ida_alloc_max(&vfio.device_ida, MINORMASK, GFP_KERNEL);
	if (ret < 0) {
		dev_dbg(dev, "Error to alloc index\n");
		return ret;
	}

	device->index = ret;
246 247 248
	init_completion(&device->comp);
	device->dev = dev;
	device->ops = ops;
249 250 251 252 253 254 255

	if (ops->init) {
		ret = ops->init(device);
		if (ret)
			goto out_uninit;
	}

256 257 258 259
	device_initialize(&device->device);
	device->device.release = vfio_device_release;
	device->device.class = vfio.device_class;
	device->device.parent = device->dev;
260 261 262
	return 0;

out_uninit:
263
	vfio_release_device_set(device);
264
	ida_free(&vfio.device_ida, device->index);
265 266 267
	return ret;
}

268 269 270 271 272
static int __vfio_register_dev(struct vfio_device *device,
			       enum vfio_group_type type)
{
	int ret;

273 274 275
	if (WARN_ON(IS_ENABLED(CONFIG_IOMMUFD) &&
		    (!device->ops->bind_iommufd ||
		     !device->ops->unbind_iommufd ||
276 277 278
		     !device->ops->attach_ioas)))
		return -EINVAL;

279 280 281 282 283 284 285
	/*
	 * If the driver doesn't specify a set then the device is added to a
	 * singleton set just for itself.
	 */
	if (!device->dev_set)
		vfio_assign_device_set(device, device);

286 287
	ret = dev_set_name(&device->device, "vfio%d", device->index);
	if (ret)
288 289 290 291 292
		return ret;

	ret = vfio_device_set_group(device, type);
	if (ret)
		return ret;
293 294 295 296 297

	ret = device_add(&device->device);
	if (ret)
		goto err_out;

298 299 300
	/* Refcounting can't start until the driver calls register */
	refcount_set(&device->refcount, 1);

301
	vfio_device_group_register(device);
302 303

	return 0;
304
err_out:
305
	vfio_device_remove_group(device);
306
	return ret;
307
}
308 309 310

int vfio_register_group_dev(struct vfio_device *device)
{
311
	return __vfio_register_dev(device, VFIO_IOMMU);
312
}
313 314
EXPORT_SYMBOL_GPL(vfio_register_group_dev);

315 316 317 318 319 320
/*
 * Register a virtual device without IOMMU backing.  The user of this
 * device must not be able to directly trigger unmediated DMA.
 */
int vfio_register_emulated_iommu_dev(struct vfio_device *device)
{
321
	return __vfio_register_dev(device, VFIO_EMULATED_IOMMU);
322 323 324
}
EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev);

Alex Williamson's avatar
Alex Williamson committed
325 326 327
/*
 * Decrement the device reference count and wait for the device to be
 * removed.  Open file descriptors for the device... */
328
void vfio_unregister_group_dev(struct vfio_device *device)
Alex Williamson's avatar
Alex Williamson committed
329
{
330
	unsigned int i = 0;
331
	bool interrupted = false;
332
	long rc;
Alex Williamson's avatar
Alex Williamson committed
333

334
	vfio_device_put_registration(device);
335 336
	rc = try_wait_for_completion(&device->comp);
	while (rc <= 0) {
337
		if (device->ops->request)
338
			device->ops->request(device, i++);
339

340
		if (interrupted) {
341 342
			rc = wait_for_completion_timeout(&device->comp,
							 HZ * 10);
343
		} else {
344 345 346
			rc = wait_for_completion_interruptible_timeout(
				&device->comp, HZ * 10);
			if (rc < 0) {
347
				interrupted = true;
348
				dev_warn(device->dev,
349 350 351 352 353 354
					 "Device is currently in use, task"
					 " \"%s\" (%d) "
					 "blocked until device is released",
					 current->comm, task_pid_nr(current));
			}
		}
355
	}
356

357
	vfio_device_group_unregister(device);
358

359 360 361
	/* Balances device_add in register path */
	device_del(&device->device);

362
	/* Balances vfio_device_set_group in register path */
363
	vfio_device_remove_group(device);
364 365 366
}
EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
#ifdef CONFIG_HAVE_KVM
void _vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm)
{
	void (*pfn)(struct kvm *kvm);
	bool (*fn)(struct kvm *kvm);
	bool ret;

	lockdep_assert_held(&device->dev_set->lock);

	pfn = symbol_get(kvm_put_kvm);
	if (WARN_ON(!pfn))
		return;

	fn = symbol_get(kvm_get_kvm_safe);
	if (WARN_ON(!fn)) {
		symbol_put(kvm_put_kvm);
		return;
	}

	ret = fn(kvm);
	symbol_put(kvm_get_kvm_safe);
	if (!ret) {
		symbol_put(kvm_put_kvm);
		return;
	}

	device->put_kvm = pfn;
	device->kvm = kvm;
}

void vfio_device_put_kvm(struct vfio_device *device)
{
	lockdep_assert_held(&device->dev_set->lock);

	if (!device->kvm)
		return;

	if (WARN_ON(!device->put_kvm))
		goto clear;

	device->put_kvm(device->kvm);
	device->put_kvm = NULL;
	symbol_put(kvm_put_kvm);

clear:
	device->kvm = NULL;
}
#endif

416
/* true if the vfio_device has open_device() called but not close_device() */
417
static bool vfio_assert_device_open(struct vfio_device *device)
418
{
419 420 421
	return !WARN_ON_ONCE(!READ_ONCE(device->open_count));
}

422 423 424 425 426 427 428 429 430 431
struct vfio_device_file *
vfio_allocate_device_file(struct vfio_device *device)
{
	struct vfio_device_file *df;

	df = kzalloc(sizeof(*df), GFP_KERNEL_ACCOUNT);
	if (!df)
		return ERR_PTR(-ENOMEM);

	df->device = device;
432
	spin_lock_init(&df->kvm_ref_lock);
433 434 435 436

	return df;
}

437
static int vfio_df_device_first_open(struct vfio_device_file *df)
438
{
439 440
	struct vfio_device *device = df->device;
	struct iommufd_ctx *iommufd = df->iommufd;
441 442 443 444 445 446 447
	int ret;

	lockdep_assert_held(&device->dev_set->lock);

	if (!try_module_get(device->dev->driver->owner))
		return -ENODEV;

448 449 450 451 452
	if (iommufd)
		ret = vfio_iommufd_bind(device, iommufd);
	else
		ret = vfio_device_group_use_iommu(device);
	if (ret)
453 454
		goto err_module_put;

455 456 457
	if (device->ops->open_device) {
		ret = device->ops->open_device(device);
		if (ret)
458
			goto err_unuse_iommu;
459 460 461
	}
	return 0;

462 463
err_unuse_iommu:
	if (iommufd)
464
		vfio_iommufd_unbind(device);
465 466
	else
		vfio_device_group_unuse_iommu(device);
467
err_module_put:
468 469 470 471
	module_put(device->dev->driver->owner);
	return ret;
}

472
static void vfio_df_device_last_close(struct vfio_device_file *df)
473
{
474 475 476
	struct vfio_device *device = df->device;
	struct iommufd_ctx *iommufd = df->iommufd;

477 478 479 480
	lockdep_assert_held(&device->dev_set->lock);

	if (device->ops->close_device)
		device->ops->close_device(device);
481
	if (iommufd)
482
		vfio_iommufd_unbind(device);
483 484
	else
		vfio_device_group_unuse_iommu(device);
485 486 487
	module_put(device->dev->driver->owner);
}

488
int vfio_df_open(struct vfio_device_file *df)
Alex Williamson's avatar
Alex Williamson committed
489
{
490
	struct vfio_device *device = df->device;
491
	int ret = 0;
492

493 494
	lockdep_assert_held(&device->dev_set->lock);

495
	device->open_count++;
496
	if (device->open_count == 1) {
497
		ret = vfio_df_device_first_open(df);
498
		if (ret)
499
			device->open_count--;
500 501
	}

502 503 504
	return ret;
}

505
void vfio_df_close(struct vfio_device_file *df)
506
{
507 508
	struct vfio_device *device = df->device;

509 510
	lockdep_assert_held(&device->dev_set->lock);

511 512
	vfio_assert_device_open(device);
	if (device->open_count == 1)
513
		vfio_df_device_last_close(df);
514 515 516
	device->open_count--;
}

517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
/*
 * Wrapper around pm_runtime_resume_and_get().
 * Return error code on failure or 0 on success.
 */
static inline int vfio_device_pm_runtime_get(struct vfio_device *device)
{
	struct device *dev = device->dev;

	if (dev->driver && dev->driver->pm) {
		int ret;

		ret = pm_runtime_resume_and_get(dev);
		if (ret) {
			dev_info_ratelimited(dev,
				"vfio: runtime resume failed %d\n", ret);
			return -EIO;
		}
	}

	return 0;
}

/*
 * Wrapper around pm_runtime_put().
 */
static inline void vfio_device_pm_runtime_put(struct vfio_device *device)
{
	struct device *dev = device->dev;

	if (dev->driver && dev->driver->pm)
		pm_runtime_put(dev);
}

550
/*
Alex Williamson's avatar
Alex Williamson committed
551 552 553 554
 * VFIO Device fd
 */
static int vfio_device_fops_release(struct inode *inode, struct file *filep)
{
555 556
	struct vfio_device_file *df = filep->private_data;
	struct vfio_device *device = df->device;
Alex Williamson's avatar
Alex Williamson committed
557

558
	vfio_df_group_close(df);
Alex Williamson's avatar
Alex Williamson committed
559

560
	vfio_device_put_registration(device);
Alex Williamson's avatar
Alex Williamson committed
561

562 563
	kfree(df);

Alex Williamson's avatar
Alex Williamson committed
564 565 566
	return 0;
}

567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
/*
 * vfio_mig_get_next_state - Compute the next step in the FSM
 * @cur_fsm - The current state the device is in
 * @new_fsm - The target state to reach
 * @next_fsm - Pointer to the next step to get to new_fsm
 *
 * Return 0 upon success, otherwise -errno
 * Upon success the next step in the state progression between cur_fsm and
 * new_fsm will be set in next_fsm.
 *
 * This breaks down requests for combination transitions into smaller steps and
 * returns the next step to get to new_fsm. The function may need to be called
 * multiple times before reaching new_fsm.
 *
 */
int vfio_mig_get_next_state(struct vfio_device *device,
			    enum vfio_device_mig_state cur_fsm,
			    enum vfio_device_mig_state new_fsm,
			    enum vfio_device_mig_state *next_fsm)
{
587
	enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_PRE_COPY_P2P + 1 };
588
	/*
589 590
	 * The coding in this table requires the driver to implement the
	 * following FSM arcs:
591 592 593 594 595
	 *         RESUMING -> STOP
	 *         STOP -> RESUMING
	 *         STOP -> STOP_COPY
	 *         STOP_COPY -> STOP
	 *
596 597 598 599 600 601
	 * If P2P is supported then the driver must also implement these FSM
	 * arcs:
	 *         RUNNING -> RUNNING_P2P
	 *         RUNNING_P2P -> RUNNING
	 *         RUNNING_P2P -> STOP
	 *         STOP -> RUNNING_P2P
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
	 *
	 * If precopy is supported then the driver must support these additional
	 * FSM arcs:
	 *         RUNNING -> PRE_COPY
	 *         PRE_COPY -> RUNNING
	 *         PRE_COPY -> STOP_COPY
	 * However, if precopy and P2P are supported together then the driver
	 * must support these additional arcs beyond the P2P arcs above:
	 *         PRE_COPY -> RUNNING
	 *         PRE_COPY -> PRE_COPY_P2P
	 *         PRE_COPY_P2P -> PRE_COPY
	 *         PRE_COPY_P2P -> RUNNING_P2P
	 *         PRE_COPY_P2P -> STOP_COPY
	 *         RUNNING -> PRE_COPY
	 *         RUNNING_P2P -> PRE_COPY_P2P
	 *
	 * Without P2P and precopy the driver must implement:
619 620 621 622 623 624
	 *         RUNNING -> STOP
	 *         STOP -> RUNNING
	 *
	 * The coding will step through multiple states for some combination
	 * transitions; if all optional features are supported, this means the
	 * following ones:
625 626 627 628 629 630 631
	 *         PRE_COPY -> PRE_COPY_P2P -> STOP_COPY
	 *         PRE_COPY -> RUNNING -> RUNNING_P2P
	 *         PRE_COPY -> RUNNING -> RUNNING_P2P -> STOP
	 *         PRE_COPY -> RUNNING -> RUNNING_P2P -> STOP -> RESUMING
	 *         PRE_COPY_P2P -> RUNNING_P2P -> RUNNING
	 *         PRE_COPY_P2P -> RUNNING_P2P -> STOP
	 *         PRE_COPY_P2P -> RUNNING_P2P -> STOP -> RESUMING
632
	 *         RESUMING -> STOP -> RUNNING_P2P
633
	 *         RESUMING -> STOP -> RUNNING_P2P -> PRE_COPY_P2P
634
	 *         RESUMING -> STOP -> RUNNING_P2P -> RUNNING
635
	 *         RESUMING -> STOP -> RUNNING_P2P -> RUNNING -> PRE_COPY
636
	 *         RESUMING -> STOP -> STOP_COPY
637
	 *         RUNNING -> RUNNING_P2P -> PRE_COPY_P2P
638 639 640
	 *         RUNNING -> RUNNING_P2P -> STOP
	 *         RUNNING -> RUNNING_P2P -> STOP -> RESUMING
	 *         RUNNING -> RUNNING_P2P -> STOP -> STOP_COPY
641
	 *         RUNNING_P2P -> RUNNING -> PRE_COPY
642 643
	 *         RUNNING_P2P -> STOP -> RESUMING
	 *         RUNNING_P2P -> STOP -> STOP_COPY
644
	 *         STOP -> RUNNING_P2P -> PRE_COPY_P2P
645
	 *         STOP -> RUNNING_P2P -> RUNNING
646
	 *         STOP -> RUNNING_P2P -> RUNNING -> PRE_COPY
647
	 *         STOP_COPY -> STOP -> RESUMING
648 649
	 *         STOP_COPY -> STOP -> RUNNING_P2P
	 *         STOP_COPY -> STOP -> RUNNING_P2P -> RUNNING
650 651 652 653
	 *
	 *  The following transitions are blocked:
	 *         STOP_COPY -> PRE_COPY
	 *         STOP_COPY -> PRE_COPY_P2P
654 655 656 657
	 */
	static const u8 vfio_from_fsm_table[VFIO_DEVICE_NUM_STATES][VFIO_DEVICE_NUM_STATES] = {
		[VFIO_DEVICE_STATE_STOP] = {
			[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
658
			[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P,
659 660
			[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P,
			[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
661 662
			[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY,
			[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING,
663
			[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
664 665 666
			[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
		},
		[VFIO_DEVICE_STATE_RUNNING] = {
667
			[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P,
668
			[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING,
669 670
			[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY,
			[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
671 672 673
			[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P,
			[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P,
			[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
674 675
			[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
		},
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
		[VFIO_DEVICE_STATE_PRE_COPY] = {
			[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING,
			[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING,
			[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY,
			[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P,
			[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_PRE_COPY_P2P,
			[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING,
			[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING,
			[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
		},
		[VFIO_DEVICE_STATE_PRE_COPY_P2P] = {
			[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P,
			[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P,
			[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY,
			[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P,
			[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY,
			[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P,
			[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
			[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
		},
696 697 698
		[VFIO_DEVICE_STATE_STOP_COPY] = {
			[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
			[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP,
699 700
			[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_ERROR,
			[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_ERROR,
701 702
			[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY,
			[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP,
703
			[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP,
704 705 706 707 708
			[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
		},
		[VFIO_DEVICE_STATE_RESUMING] = {
			[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
			[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP,
709 710
			[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_STOP,
			[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_STOP,
711 712
			[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP,
			[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING,
713 714 715 716 717 718
			[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP,
			[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
		},
		[VFIO_DEVICE_STATE_RUNNING_P2P] = {
			[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP,
			[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING,
719 720
			[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_RUNNING,
			[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P,
721 722 723
			[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP,
			[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP,
			[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P,
724 725 726 727 728
			[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
		},
		[VFIO_DEVICE_STATE_ERROR] = {
			[VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_ERROR,
			[VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_ERROR,
729 730
			[VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_ERROR,
			[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_ERROR,
731 732
			[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_ERROR,
			[VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_ERROR,
733
			[VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_ERROR,
734 735 736 737
			[VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR,
		},
	};

738 739 740
	static const unsigned int state_flags_table[VFIO_DEVICE_NUM_STATES] = {
		[VFIO_DEVICE_STATE_STOP] = VFIO_MIGRATION_STOP_COPY,
		[VFIO_DEVICE_STATE_RUNNING] = VFIO_MIGRATION_STOP_COPY,
741 742 743 744 745
		[VFIO_DEVICE_STATE_PRE_COPY] =
			VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY,
		[VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_MIGRATION_STOP_COPY |
						   VFIO_MIGRATION_P2P |
						   VFIO_MIGRATION_PRE_COPY,
746 747 748 749 750 751 752 753 754 755
		[VFIO_DEVICE_STATE_STOP_COPY] = VFIO_MIGRATION_STOP_COPY,
		[VFIO_DEVICE_STATE_RESUMING] = VFIO_MIGRATION_STOP_COPY,
		[VFIO_DEVICE_STATE_RUNNING_P2P] =
			VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P,
		[VFIO_DEVICE_STATE_ERROR] = ~0U,
	};

	if (WARN_ON(cur_fsm >= ARRAY_SIZE(vfio_from_fsm_table) ||
		    (state_flags_table[cur_fsm] & device->migration_flags) !=
			state_flags_table[cur_fsm]))
756 757
		return -EINVAL;

758 759 760
	if (new_fsm >= ARRAY_SIZE(vfio_from_fsm_table) ||
	   (state_flags_table[new_fsm] & device->migration_flags) !=
			state_flags_table[new_fsm])
761 762
		return -EINVAL;

763 764 765 766 767
	/*
	 * Arcs touching optional and unsupported states are skipped over. The
	 * driver will instead see an arc from the original state to the next
	 * logical state, as per the above comment.
	 */
768
	*next_fsm = vfio_from_fsm_table[cur_fsm][new_fsm];
769 770 771 772
	while ((state_flags_table[*next_fsm] & device->migration_flags) !=
			state_flags_table[*next_fsm])
		*next_fsm = vfio_from_fsm_table[*next_fsm][new_fsm];

773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
	return (*next_fsm != VFIO_DEVICE_STATE_ERROR) ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(vfio_mig_get_next_state);

/*
 * Convert the drivers's struct file into a FD number and return it to userspace
 */
static int vfio_ioct_mig_return_fd(struct file *filp, void __user *arg,
				   struct vfio_device_feature_mig_state *mig)
{
	int ret;
	int fd;

	fd = get_unused_fd_flags(O_CLOEXEC);
	if (fd < 0) {
		ret = fd;
		goto out_fput;
	}

	mig->data_fd = fd;
	if (copy_to_user(arg, mig, sizeof(*mig))) {
		ret = -EFAULT;
		goto out_put_unused;
	}
	fd_install(fd, filp);
	return 0;

out_put_unused:
	put_unused_fd(fd);
out_fput:
	fput(filp);
	return ret;
}

static int
vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device,
					   u32 flags, void __user *arg,
					   size_t argsz)
{
	size_t minsz =
		offsetofend(struct vfio_device_feature_mig_state, data_fd);
	struct vfio_device_feature_mig_state mig;
	struct file *filp = NULL;
	int ret;

818
	if (!device->mig_ops)
819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
		return -ENOTTY;

	ret = vfio_check_feature(flags, argsz,
				 VFIO_DEVICE_FEATURE_SET |
				 VFIO_DEVICE_FEATURE_GET,
				 sizeof(mig));
	if (ret != 1)
		return ret;

	if (copy_from_user(&mig, arg, minsz))
		return -EFAULT;

	if (flags & VFIO_DEVICE_FEATURE_GET) {
		enum vfio_device_mig_state curr_state;

834 835
		ret = device->mig_ops->migration_get_state(device,
							   &curr_state);
836 837 838 839 840 841 842
		if (ret)
			return ret;
		mig.device_state = curr_state;
		goto out_copy;
	}

	/* Handle the VFIO_DEVICE_FEATURE_SET */
843
	filp = device->mig_ops->migration_set_state(device, mig.device_state);
844 845 846 847 848 849 850 851 852 853 854 855 856
	if (IS_ERR(filp) || !filp)
		goto out_copy;

	return vfio_ioct_mig_return_fd(filp, arg, &mig);
out_copy:
	mig.data_fd = -1;
	if (copy_to_user(arg, &mig, sizeof(mig)))
		return -EFAULT;
	if (IS_ERR(filp))
		return PTR_ERR(filp);
	return 0;
}

857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
static int
vfio_ioctl_device_feature_migration_data_size(struct vfio_device *device,
					      u32 flags, void __user *arg,
					      size_t argsz)
{
	struct vfio_device_feature_mig_data_size data_size = {};
	unsigned long stop_copy_length;
	int ret;

	if (!device->mig_ops)
		return -ENOTTY;

	ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET,
				 sizeof(data_size));
	if (ret != 1)
		return ret;

	ret = device->mig_ops->migration_get_data_size(device, &stop_copy_length);
	if (ret)
		return ret;

	data_size.stop_copy_length = stop_copy_length;
	if (copy_to_user(arg, &data_size, sizeof(data_size)))
		return -EFAULT;

	return 0;
}

885 886 887 888 889
static int vfio_ioctl_device_feature_migration(struct vfio_device *device,
					       u32 flags, void __user *arg,
					       size_t argsz)
{
	struct vfio_device_feature_migration mig = {
890
		.flags = device->migration_flags,
891 892 893
	};
	int ret;

894
	if (!device->mig_ops)
895 896 897 898 899 900 901 902 903 904 905
		return -ENOTTY;

	ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET,
				 sizeof(mig));
	if (ret != 1)
		return ret;
	if (copy_to_user(arg, &mig, sizeof(mig)))
		return -EFAULT;
	return 0;
}

906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
/* Ranges should fit into a single kernel page */
#define LOG_MAX_RANGES \
	(PAGE_SIZE / sizeof(struct vfio_device_feature_dma_logging_range))

static int
vfio_ioctl_device_feature_logging_start(struct vfio_device *device,
					u32 flags, void __user *arg,
					size_t argsz)
{
	size_t minsz =
		offsetofend(struct vfio_device_feature_dma_logging_control,
			    ranges);
	struct vfio_device_feature_dma_logging_range __user *ranges;
	struct vfio_device_feature_dma_logging_control control;
	struct vfio_device_feature_dma_logging_range range;
	struct rb_root_cached root = RB_ROOT_CACHED;
	struct interval_tree_node *nodes;
	u64 iova_end;
	u32 nnodes;
	int i, ret;

	if (!device->log_ops)
		return -ENOTTY;

	ret = vfio_check_feature(flags, argsz,
				 VFIO_DEVICE_FEATURE_SET,
				 sizeof(control));
	if (ret != 1)
		return ret;

	if (copy_from_user(&control, arg, minsz))
		return -EFAULT;

	nnodes = control.num_ranges;
	if (!nnodes)
		return -EINVAL;

	if (nnodes > LOG_MAX_RANGES)
		return -E2BIG;

	ranges = u64_to_user_ptr(control.ranges);
	nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
			      GFP_KERNEL);
	if (!nodes)
		return -ENOMEM;

	for (i = 0; i < nnodes; i++) {
		if (copy_from_user(&range, &ranges[i], sizeof(range))) {
			ret = -EFAULT;
			goto end;
		}
		if (!IS_ALIGNED(range.iova, control.page_size) ||
		    !IS_ALIGNED(range.length, control.page_size)) {
			ret = -EINVAL;
			goto end;
		}

		if (check_add_overflow(range.iova, range.length, &iova_end) ||
		    iova_end > ULONG_MAX) {
			ret = -EOVERFLOW;
			goto end;
		}

		nodes[i].start = range.iova;
		nodes[i].last = range.iova + range.length - 1;
		if (interval_tree_iter_first(&root, nodes[i].start,
					     nodes[i].last)) {
			/* Range overlapping */
			ret = -EINVAL;
			goto end;
		}
		interval_tree_insert(nodes + i, &root);
	}

	ret = device->log_ops->log_start(device, &root, nnodes,
					 &control.page_size);
	if (ret)
		goto end;

	if (copy_to_user(arg, &control, sizeof(control))) {
		ret = -EFAULT;
		device->log_ops->log_stop(device);
	}

end:
	kfree(nodes);
	return ret;
}

static int
vfio_ioctl_device_feature_logging_stop(struct vfio_device *device,
				       u32 flags, void __user *arg,
				       size_t argsz)
{
	int ret;

	if (!device->log_ops)
		return -ENOTTY;

	ret = vfio_check_feature(flags, argsz,
				 VFIO_DEVICE_FEATURE_SET, 0);
	if (ret != 1)
		return ret;

	return device->log_ops->log_stop(device);
}

static int vfio_device_log_read_and_clear(struct iova_bitmap *iter,
					  unsigned long iova, size_t length,
					  void *opaque)
{
	struct vfio_device *device = opaque;

	return device->log_ops->log_read_and_clear(device, iova, length, iter);
}

static int
vfio_ioctl_device_feature_logging_report(struct vfio_device *device,
					 u32 flags, void __user *arg,
					 size_t argsz)
{
	size_t minsz =
		offsetofend(struct vfio_device_feature_dma_logging_report,
			    bitmap);
	struct vfio_device_feature_dma_logging_report report;
	struct iova_bitmap *iter;
	u64 iova_end;
	int ret;

	if (!device->log_ops)
		return -ENOTTY;

	ret = vfio_check_feature(flags, argsz,
				 VFIO_DEVICE_FEATURE_GET,
				 sizeof(report));
	if (ret != 1)
		return ret;

	if (copy_from_user(&report, arg, minsz))
		return -EFAULT;

	if (report.page_size < SZ_4K || !is_power_of_2(report.page_size))
		return -EINVAL;

	if (check_add_overflow(report.iova, report.length, &iova_end) ||
	    iova_end > ULONG_MAX)
		return -EOVERFLOW;

	iter = iova_bitmap_alloc(report.iova, report.length,
				 report.page_size,
				 u64_to_user_ptr(report.bitmap));
	if (IS_ERR(iter))
		return PTR_ERR(iter);

	ret = iova_bitmap_for_each(iter, device,
				   vfio_device_log_read_and_clear);

	iova_bitmap_free(iter);
	return ret;
}

1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
static int vfio_ioctl_device_feature(struct vfio_device *device,
				     struct vfio_device_feature __user *arg)
{
	size_t minsz = offsetofend(struct vfio_device_feature, flags);
	struct vfio_device_feature feature;

	if (copy_from_user(&feature, arg, minsz))
		return -EFAULT;

	if (feature.argsz < minsz)
		return -EINVAL;

	/* Check unknown flags */
	if (feature.flags &
	    ~(VFIO_DEVICE_FEATURE_MASK | VFIO_DEVICE_FEATURE_SET |
	      VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_PROBE))
		return -EINVAL;

	/* GET & SET are mutually exclusive except with PROBE */
	if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) &&
	    (feature.flags & VFIO_DEVICE_FEATURE_SET) &&
	    (feature.flags & VFIO_DEVICE_FEATURE_GET))
		return -EINVAL;

	switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) {
1092 1093 1094 1095 1096 1097 1098 1099
	case VFIO_DEVICE_FEATURE_MIGRATION:
		return vfio_ioctl_device_feature_migration(
			device, feature.flags, arg->data,
			feature.argsz - minsz);
	case VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE:
		return vfio_ioctl_device_feature_mig_device_state(
			device, feature.flags, arg->data,
			feature.argsz - minsz);
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
	case VFIO_DEVICE_FEATURE_DMA_LOGGING_START:
		return vfio_ioctl_device_feature_logging_start(
			device, feature.flags, arg->data,
			feature.argsz - minsz);
	case VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP:
		return vfio_ioctl_device_feature_logging_stop(
			device, feature.flags, arg->data,
			feature.argsz - minsz);
	case VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT:
		return vfio_ioctl_device_feature_logging_report(
			device, feature.flags, arg->data,
			feature.argsz - minsz);
1112 1113 1114 1115
	case VFIO_DEVICE_FEATURE_MIG_DATA_SIZE:
		return vfio_ioctl_device_feature_migration_data_size(
			device, feature.flags, arg->data,
			feature.argsz - minsz);
1116 1117 1118 1119 1120 1121 1122 1123 1124
	default:
		if (unlikely(!device->ops->device_feature))
			return -EINVAL;
		return device->ops->device_feature(device, feature.flags,
						   arg->data,
						   feature.argsz - minsz);
	}
}

Alex Williamson's avatar
Alex Williamson committed
1125 1126 1127
static long vfio_device_fops_unl_ioctl(struct file *filep,
				       unsigned int cmd, unsigned long arg)
{
1128 1129
	struct vfio_device_file *df = filep->private_data;
	struct vfio_device *device = df->device;
1130 1131 1132 1133 1134
	int ret;

	ret = vfio_device_pm_runtime_get(device);
	if (ret)
		return ret;
Alex Williamson's avatar
Alex Williamson committed
1135

1136 1137
	switch (cmd) {
	case VFIO_DEVICE_FEATURE:
1138 1139 1140
		ret = vfio_ioctl_device_feature(device, (void __user *)arg);
		break;

1141 1142
	default:
		if (unlikely(!device->ops->ioctl))
1143 1144 1145 1146
			ret = -EINVAL;
		else
			ret = device->ops->ioctl(device, cmd, arg);
		break;
1147
	}
1148 1149 1150

	vfio_device_pm_runtime_put(device);
	return ret;
Alex Williamson's avatar
Alex Williamson committed
1151 1152 1153 1154 1155
}

static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
				     size_t count, loff_t *ppos)
{
1156 1157
	struct vfio_device_file *df = filep->private_data;
	struct vfio_device *device = df->device;
Alex Williamson's avatar
Alex Williamson committed
1158 1159 1160 1161

	if (unlikely(!device->ops->read))
		return -EINVAL;

1162
	return device->ops->read(device, buf, count, ppos);
Alex Williamson's avatar
Alex Williamson committed
1163 1164 1165 1166 1167 1168
}

static ssize_t vfio_device_fops_write(struct file *filep,
				      const char __user *buf,
				      size_t count, loff_t *ppos)
{
1169 1170
	struct vfio_device_file *df = filep->private_data;
	struct vfio_device *device = df->device;
Alex Williamson's avatar
Alex Williamson committed
1171 1172 1173 1174

	if (unlikely(!device->ops->write))
		return -EINVAL;

1175
	return device->ops->write(device, buf, count, ppos);
Alex Williamson's avatar
Alex Williamson committed
1176 1177 1178 1179
}

static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
{
1180 1181
	struct vfio_device_file *df = filep->private_data;
	struct vfio_device *device = df->device;
Alex Williamson's avatar
Alex Williamson committed
1182 1183 1184 1185

	if (unlikely(!device->ops->mmap))
		return -EINVAL;

1186
	return device->ops->mmap(device, vma);
Alex Williamson's avatar
Alex Williamson committed
1187 1188
}

1189
const struct file_operations vfio_device_fops = {
Alex Williamson's avatar
Alex Williamson committed
1190 1191 1192 1193 1194
	.owner		= THIS_MODULE,
	.release	= vfio_device_fops_release,
	.read		= vfio_device_fops_read,
	.write		= vfio_device_fops_write,
	.unlocked_ioctl	= vfio_device_fops_unl_ioctl,
1195
	.compat_ioctl	= compat_ptr_ioctl,
Alex Williamson's avatar
Alex Williamson committed
1196 1197 1198
	.mmap		= vfio_device_fops_mmap,
};

1199 1200 1201 1202 1203 1204 1205 1206 1207
static struct vfio_device *vfio_device_from_file(struct file *file)
{
	struct vfio_device_file *df = file->private_data;

	if (file->f_op != &vfio_device_fops)
		return NULL;
	return df->device;
}

1208 1209 1210 1211 1212 1213
/**
 * vfio_file_is_valid - True if the file is valid vfio file
 * @file: VFIO group file or VFIO device file
 */
bool vfio_file_is_valid(struct file *file)
{
1214 1215
	return vfio_group_from_file(file) ||
	       vfio_device_from_file(file);
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
}
EXPORT_SYMBOL_GPL(vfio_file_is_valid);

/**
 * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file
 *        is always CPU cache coherent
 * @file: VFIO group file or VFIO device file
 *
 * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop
 * bit in DMA transactions. A return of false indicates that the user has
 * rights to access additional instructions such as wbinvd on x86.
 */
bool vfio_file_enforced_coherent(struct file *file)
{
1230
	struct vfio_device *device;
1231 1232 1233 1234 1235 1236
	struct vfio_group *group;

	group = vfio_group_from_file(file);
	if (group)
		return vfio_group_enforced_coherent(group);

1237 1238 1239 1240 1241
	device = vfio_device_from_file(file);
	if (device)
		return device_iommu_capable(device->dev,
					    IOMMU_CAP_ENFORCE_CACHE_COHERENCY);

1242 1243 1244 1245
	return true;
}
EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent);

1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
static void vfio_device_file_set_kvm(struct file *file, struct kvm *kvm)
{
	struct vfio_device_file *df = file->private_data;

	/*
	 * The kvm is first recorded in the vfio_device_file, and will
	 * be propagated to vfio_device::kvm when the file is bound to
	 * iommufd successfully in the vfio device cdev path.
	 */
	spin_lock(&df->kvm_ref_lock);
	df->kvm = kvm;
	spin_unlock(&df->kvm_ref_lock);
}

1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
/**
 * vfio_file_set_kvm - Link a kvm with VFIO drivers
 * @file: VFIO group file or VFIO device file
 * @kvm: KVM to link
 *
 * When a VFIO device is first opened the KVM will be available in
 * device->kvm if one was associated with the file.
 */
void vfio_file_set_kvm(struct file *file, struct kvm *kvm)
{
	struct vfio_group *group;

	group = vfio_group_from_file(file);
	if (group)
		vfio_group_set_kvm(group, kvm);
1275 1276 1277

	if (vfio_device_from_file(file))
		vfio_device_file_set_kvm(file, kvm);
1278 1279 1280
}
EXPORT_SYMBOL_GPL(vfio_file_set_kvm);

1281
/*
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
 * Sub-module support
 */
/*
 * Helper for managing a buffer of info chain capabilities, allocate or
 * reallocate a buffer with additional @size, filling in @id and @version
 * of the capability.  A pointer to the new capability is returned.
 *
 * NB. The chain is based at the head of the buffer, so new entries are
 * added to the tail, vfio_info_cap_shift() should be called to fixup the
 * next offsets prior to copying to the user buffer.
 */
struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
					       size_t size, u16 id, u16 version)
{
	void *buf;
	struct vfio_info_cap_header *header, *tmp;

	buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
	if (!buf) {
		kfree(caps->buf);
1302
		caps->buf = NULL;
1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
		caps->size = 0;
		return ERR_PTR(-ENOMEM);
	}

	caps->buf = buf;
	header = buf + caps->size;

	/* Eventually copied to user buffer, zero */
	memset(header, 0, size);

	header->id = id;
	header->version = version;

	/* Add to the end of the capability chain */
1317
	for (tmp = buf; tmp->next; tmp = buf + tmp->next)
1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
		; /* nothing */

	tmp->next = caps->size;
	caps->size += size;

	return header;
}
EXPORT_SYMBOL_GPL(vfio_info_cap_add);

void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
{
	struct vfio_info_cap_header *tmp;
1330
	void *buf = (void *)caps->buf;
1331

1332
	for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset)
1333 1334
		tmp->next += offset;
}
1335
EXPORT_SYMBOL(vfio_info_cap_shift);
1336

1337 1338
int vfio_info_add_capability(struct vfio_info_cap *caps,
			     struct vfio_info_cap_header *cap, size_t size)
1339 1340 1341
{
	struct vfio_info_cap_header *header;

1342
	header = vfio_info_cap_add(caps, size, cap->id, cap->version);
1343 1344 1345
	if (IS_ERR(header))
		return PTR_ERR(header);

1346
	memcpy(header + 1, cap + 1, size - sizeof(*header));
1347 1348 1349 1350

	return 0;
}
EXPORT_SYMBOL(vfio_info_add_capability);
1351

1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
				       int max_irq_type, size_t *data_size)
{
	unsigned long minsz;
	size_t size;

	minsz = offsetofend(struct vfio_irq_set, count);

	if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) ||
	    (hdr->count >= (U32_MAX - hdr->start)) ||
	    (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
				VFIO_IRQ_SET_ACTION_TYPE_MASK)))
		return -EINVAL;

	if (data_size)
		*data_size = 0;

	if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs)
		return -EINVAL;

	switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
	case VFIO_IRQ_SET_DATA_NONE:
		size = 0;
		break;
	case VFIO_IRQ_SET_DATA_BOOL:
		size = sizeof(uint8_t);
		break;
	case VFIO_IRQ_SET_DATA_EVENTFD:
		size = sizeof(int32_t);
		break;
	default:
		return -EINVAL;
	}

	if (size) {
		if (hdr->argsz - minsz < hdr->count * size)
			return -EINVAL;

		if (!data_size)
			return -EINVAL;

		*data_size = hdr->count * size;
	}

	return 0;
}
EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);

1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
/*
 * Pin contiguous user pages and return their associated host pages for local
 * domain only.
 * @device [in]  : device
 * @iova [in]    : starting IOVA of user pages to be pinned.
 * @npage [in]   : count of pages to be pinned.  This count should not
 *		   be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
 * @prot [in]    : protection flags
 * @pages[out]   : array of host pages
 * Return error or number of pages pinned.
 *
 * A driver may only call this function if the vfio_device was created
1412
 * by vfio_register_emulated_iommu_dev() due to vfio_device_container_pin_pages().
1413 1414 1415 1416 1417 1418 1419
 */
int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
		   int npage, int prot, struct page **pages)
{
	/* group->container cannot change while a vfio device is open */
	if (!pages || !npage || WARN_ON(!vfio_assert_device_open(device)))
		return -EINVAL;
1420 1421 1422
	if (vfio_device_has_container(device))
		return vfio_device_container_pin_pages(device, iova,
						       npage, prot, pages);
1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457
	if (device->iommufd_access) {
		int ret;

		if (iova > ULONG_MAX)
			return -EINVAL;
		/*
		 * VFIO ignores the sub page offset, npages is from the start of
		 * a PAGE_SIZE chunk of IOVA. The caller is expected to recover
		 * the sub page offset by doing:
		 *     pages[0] + (iova % PAGE_SIZE)
		 */
		ret = iommufd_access_pin_pages(
			device->iommufd_access, ALIGN_DOWN(iova, PAGE_SIZE),
			npage * PAGE_SIZE, pages,
			(prot & IOMMU_WRITE) ? IOMMUFD_ACCESS_RW_WRITE : 0);
		if (ret)
			return ret;
		return npage;
	}
	return -EINVAL;
}
EXPORT_SYMBOL(vfio_pin_pages);

/*
 * Unpin contiguous host pages for local domain only.
 * @device [in]  : device
 * @iova [in]    : starting address of user pages to be unpinned.
 * @npage [in]   : count of pages to be unpinned.  This count should not
 *                 be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
 */
void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage)
{
	if (WARN_ON(!vfio_assert_device_open(device)))
		return;

1458 1459
	if (vfio_device_has_container(device)) {
		vfio_device_container_unpin_pages(device, iova, npage);
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
		return;
	}
	if (device->iommufd_access) {
		if (WARN_ON(iova > ULONG_MAX))
			return;
		iommufd_access_unpin_pages(device->iommufd_access,
					   ALIGN_DOWN(iova, PAGE_SIZE),
					   npage * PAGE_SIZE);
		return;
	}
}
EXPORT_SYMBOL(vfio_unpin_pages);

/*
 * This interface allows the CPUs to perform some sort of virtual DMA on
 * behalf of the device.
 *
 * CPUs read/write from/into a range of IOVAs pointing to user space memory
 * into/from a kernel buffer.
 *
 * As the read/write of user space memory is conducted via the CPUs and is
 * not a real device DMA, it is not necessary to pin the user space memory.
 *
 * @device [in]		: VFIO device
 * @iova [in]		: base IOVA of a user space buffer
 * @data [in]		: pointer to kernel buffer
 * @len [in]		: kernel buffer length
 * @write		: indicate read or write
 * Return error code on failure or 0 on success.
 */
int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data,
		size_t len, bool write)
{
	if (!data || len <= 0 || !vfio_assert_device_open(device))
		return -EINVAL;

1496 1497 1498
	if (vfio_device_has_container(device))
		return vfio_device_container_dma_rw(device, iova,
						    data, len, write);
1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517

	if (device->iommufd_access) {
		unsigned int flags = 0;

		if (iova > ULONG_MAX)
			return -EINVAL;

		/* VFIO historically tries to auto-detect a kthread */
		if (!current->mm)
			flags |= IOMMUFD_ACCESS_RW_KTHREAD;
		if (write)
			flags |= IOMMUFD_ACCESS_RW_WRITE;
		return iommufd_access_rw(device->iommufd_access, iova, data,
					 len, flags);
	}
	return -EINVAL;
}
EXPORT_SYMBOL(vfio_dma_rw);

1518
/*
Alex Williamson's avatar
Alex Williamson committed
1519 1520
 * Module/class support
 */
1521 1522 1523 1524 1525 1526 1527 1528 1529 1530
static int __init vfio_init(void)
{
	int ret;

	ida_init(&vfio.device_ida);

	ret = vfio_group_init();
	if (ret)
		return ret;

1531 1532 1533 1534
	ret = vfio_virqfd_init();
	if (ret)
		goto err_virqfd;

1535
	/* /sys/class/vfio-dev/vfioX */
1536
	vfio.device_class = class_create("vfio-dev");
1537 1538 1539 1540 1541 1542 1543 1544 1545
	if (IS_ERR(vfio.device_class)) {
		ret = PTR_ERR(vfio.device_class);
		goto err_dev_class;
	}

	pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
	return 0;

err_dev_class:
1546 1547
	vfio_virqfd_exit();
err_virqfd:
1548 1549 1550 1551 1552 1553 1554 1555 1556
	vfio_group_cleanup();
	return ret;
}

static void __exit vfio_cleanup(void)
{
	ida_destroy(&vfio.device_ida);
	class_destroy(vfio.device_class);
	vfio.device_class = NULL;
1557
	vfio_virqfd_exit();
1558
	vfio_group_cleanup();
1559
	xa_destroy(&vfio_device_set_xa);
Alex Williamson's avatar
Alex Williamson committed
1560 1561 1562 1563 1564 1565 1566 1567 1568
}

module_init(vfio_init);
module_exit(vfio_cleanup);

MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
1569
MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce");