kfd_chardev.c 28.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include <linux/device.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <uapi/linux/kfd_ioctl.h>
#include <linux/time.h>
#include <linux/mm.h>
34
#include <linux/mman.h>
35 36
#include <asm/processor.h>
#include "kfd_priv.h"
37
#include "kfd_device_queue_manager.h"
38
#include "kfd_dbgmgr.h"
39 40 41

static long kfd_ioctl(struct file *, unsigned int, unsigned long);
static int kfd_open(struct inode *, struct file *);
42
static int kfd_mmap(struct file *, struct vm_area_struct *);
43 44 45 46 47 48 49 50

static const char kfd_dev_name[] = "kfd";

static const struct file_operations kfd_fops = {
	.owner = THIS_MODULE,
	.unlocked_ioctl = kfd_ioctl,
	.compat_ioctl = kfd_ioctl,
	.open = kfd_open,
51
	.mmap = kfd_mmap,
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
};

static int kfd_char_dev_major = -1;
static struct class *kfd_class;
struct device *kfd_device;

int kfd_chardev_init(void)
{
	int err = 0;

	kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
	err = kfd_char_dev_major;
	if (err < 0)
		goto err_register_chrdev;

	kfd_class = class_create(THIS_MODULE, kfd_dev_name);
	err = PTR_ERR(kfd_class);
	if (IS_ERR(kfd_class))
		goto err_class_create;

	kfd_device = device_create(kfd_class, NULL,
					MKDEV(kfd_char_dev_major, 0),
					NULL, kfd_dev_name);
	err = PTR_ERR(kfd_device);
	if (IS_ERR(kfd_device))
		goto err_device_create;

	return 0;

err_device_create:
	class_destroy(kfd_class);
err_class_create:
	unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
err_register_chrdev:
	return err;
}

void kfd_chardev_exit(void)
{
	device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
	class_destroy(kfd_class);
	unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
}

struct device *kfd_chardev(void)
{
	return kfd_device;
}


static int kfd_open(struct inode *inode, struct file *filep)
{
104
	struct kfd_process *process;
105
	bool is_32bit_user_mode;
106

107 108 109
	if (iminor(inode) != 0)
		return -ENODEV;

110
	is_32bit_user_mode = in_compat_syscall();
111

112
	if (is_32bit_user_mode) {
113 114 115 116 117 118 119
		dev_warn(kfd_device,
			"Process %d (32-bit) failed to open /dev/kfd\n"
			"32-bit processes are not supported by amdkfd\n",
			current->pid);
		return -EPERM;
	}

120
	process = kfd_create_process(filep);
121 122 123 124 125 126
	if (IS_ERR(process))
		return PTR_ERR(process);

	dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
		process->pasid, process->is_32bit_user_mode);

127 128 129
	return 0;
}

130 131
static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
					void *data)
132
{
133
	struct kfd_ioctl_get_version_args *args = data;
134

135 136
	args->major_version = KFD_IOCTL_MAJOR_VERSION;
	args->minor_version = KFD_IOCTL_MINOR_VERSION;
137

138
	return 0;
139 140
}

141 142 143 144
static int set_queue_properties_from_user(struct queue_properties *q_properties,
				struct kfd_ioctl_create_queue_args *args)
{
	if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
145
		pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
146 147 148 149
		return -EINVAL;
	}

	if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
150
		pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
151 152 153 154
		return -EINVAL;
	}

	if ((args->ring_base_address) &&
155 156 157
		(!access_ok(VERIFY_WRITE,
			(const void __user *) args->ring_base_address,
			sizeof(uint64_t)))) {
158
		pr_err("Can't access ring base address\n");
159 160 161 162
		return -EFAULT;
	}

	if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
163
		pr_err("Ring size must be a power of 2 or 0\n");
164 165 166
		return -EINVAL;
	}

167 168 169
	if (!access_ok(VERIFY_WRITE,
			(const void __user *) args->read_pointer_address,
			sizeof(uint32_t))) {
170
		pr_err("Can't access read pointer\n");
171 172 173
		return -EFAULT;
	}

174 175 176
	if (!access_ok(VERIFY_WRITE,
			(const void __user *) args->write_pointer_address,
			sizeof(uint32_t))) {
177
		pr_err("Can't access write pointer\n");
178 179 180
		return -EFAULT;
	}

Oded Gabbay's avatar
Oded Gabbay committed
181 182 183 184
	if (args->eop_buffer_address &&
		!access_ok(VERIFY_WRITE,
			(const void __user *) args->eop_buffer_address,
			sizeof(uint32_t))) {
185
		pr_debug("Can't access eop buffer");
186 187 188
		return -EFAULT;
	}

Oded Gabbay's avatar
Oded Gabbay committed
189 190 191 192
	if (args->ctx_save_restore_address &&
		!access_ok(VERIFY_WRITE,
			(const void __user *) args->ctx_save_restore_address,
			sizeof(uint32_t))) {
193
		pr_debug("Can't access ctx save restore buffer");
194 195 196
		return -EFAULT;
	}

197 198 199 200 201 202 203
	q_properties->is_interop = false;
	q_properties->queue_percent = args->queue_percentage;
	q_properties->priority = args->queue_priority;
	q_properties->queue_address = args->ring_base_address;
	q_properties->queue_size = args->ring_size;
	q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
	q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
204 205 206 207 208
	q_properties->eop_ring_buffer_address = args->eop_buffer_address;
	q_properties->eop_ring_buffer_size = args->eop_buffer_size;
	q_properties->ctx_save_restore_area_address =
			args->ctx_save_restore_address;
	q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
209
	q_properties->ctl_stack_size = args->ctl_stack_size;
210 211 212
	if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
		args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
		q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
213 214
	else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
		q_properties->type = KFD_QUEUE_TYPE_SDMA;
215 216 217 218 219 220 221 222
	else
		return -ENOTSUPP;

	if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
		q_properties->format = KFD_QUEUE_FORMAT_AQL;
	else
		q_properties->format = KFD_QUEUE_FORMAT_PM4;

223
	pr_debug("Queue Percentage: %d, %d\n",
224 225
			q_properties->queue_percent, args->queue_percentage);

226
	pr_debug("Queue Priority: %d, %d\n",
227 228
			q_properties->priority, args->queue_priority);

229
	pr_debug("Queue Address: 0x%llX, 0x%llX\n",
230 231
			q_properties->queue_address, args->ring_base_address);

232
	pr_debug("Queue Size: 0x%llX, %u\n",
233 234
			q_properties->queue_size, args->ring_size);

235 236 237
	pr_debug("Queue r/w Pointers: %p, %p\n",
			q_properties->read_ptr,
			q_properties->write_ptr);
238

239
	pr_debug("Queue Format: %d\n", q_properties->format);
240

241
	pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
242

243
	pr_debug("Queue CTX save area: 0x%llX\n",
244 245
			q_properties->ctx_save_restore_area_address);

246 247 248
	return 0;
}

249 250
static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
					void *data)
251
{
252
	struct kfd_ioctl_create_queue_args *args = data;
253 254 255 256 257 258 259 260
	struct kfd_dev *dev;
	int err = 0;
	unsigned int queue_id;
	struct kfd_process_device *pdd;
	struct queue_properties q_properties;

	memset(&q_properties, 0, sizeof(struct queue_properties));

261
	pr_debug("Creating queue ioctl\n");
262

263
	err = set_queue_properties_from_user(&q_properties, args);
264 265 266
	if (err)
		return err;

267
	pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
268
	dev = kfd_device_by_id(args->gpu_id);
269
	if (!dev) {
270
		pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
271
		return -EINVAL;
272
	}
273 274 275 276

	mutex_lock(&p->mutex);

	pdd = kfd_bind_process_to_device(dev, p);
277
	if (IS_ERR(pdd)) {
278
		err = -ESRCH;
279 280 281
		goto err_bind_process;
	}

282
	pr_debug("Creating queue for PASID %d on gpu 0x%x\n",
283 284 285
			p->pasid,
			dev->id);

286
	err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id);
287 288 289
	if (err != 0)
		goto err_create_queue;

290
	args->queue_id = queue_id;
291

292

293
	/* Return gpu_id as doorbell offset for mmap usage */
294 295
	args->doorbell_offset = (KFD_MMAP_DOORBELL_MASK | args->gpu_id);
	args->doorbell_offset <<= PAGE_SHIFT;
296 297 298

	mutex_unlock(&p->mutex);

299
	pr_debug("Queue id %d was created successfully\n", args->queue_id);
300

301
	pr_debug("Ring buffer address == 0x%016llX\n",
302
			args->ring_base_address);
303

304
	pr_debug("Read ptr address    == 0x%016llX\n",
305
			args->read_pointer_address);
306

307
	pr_debug("Write ptr address   == 0x%016llX\n",
308
			args->write_pointer_address);
309 310 311 312 313 314 315

	return 0;

err_create_queue:
err_bind_process:
	mutex_unlock(&p->mutex);
	return err;
316 317 318
}

static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
319
					void *data)
320
{
321
	int retval;
322
	struct kfd_ioctl_destroy_queue_args *args = data;
323

324
	pr_debug("Destroying queue id %d for pasid %d\n",
325
				args->queue_id,
326 327 328 329
				p->pasid);

	mutex_lock(&p->mutex);

330
	retval = pqm_destroy_queue(&p->pqm, args->queue_id);
331 332 333

	mutex_unlock(&p->mutex);
	return retval;
334 335 336
}

static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
337
					void *data)
338
{
339
	int retval;
340
	struct kfd_ioctl_update_queue_args *args = data;
341 342
	struct queue_properties properties;

343
	if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
344
		pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
345 346 347
		return -EINVAL;
	}

348
	if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
349
		pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
350 351 352
		return -EINVAL;
	}

353
	if ((args->ring_base_address) &&
354
		(!access_ok(VERIFY_WRITE,
355
			(const void __user *) args->ring_base_address,
356
			sizeof(uint64_t)))) {
357
		pr_err("Can't access ring base address\n");
358 359 360
		return -EFAULT;
	}

361
	if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
362
		pr_err("Ring size must be a power of 2 or 0\n");
363 364 365
		return -EINVAL;
	}

366 367 368 369
	properties.queue_address = args->ring_base_address;
	properties.queue_size = args->ring_size;
	properties.queue_percent = args->queue_percentage;
	properties.priority = args->queue_priority;
370

371
	pr_debug("Updating queue id %d for pasid %d\n",
372
			args->queue_id, p->pasid);
373 374 375

	mutex_lock(&p->mutex);

376
	retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
377 378 379 380

	mutex_unlock(&p->mutex);

	return retval;
381 382
}

383 384
static int kfd_ioctl_set_memory_policy(struct file *filep,
					struct kfd_process *p, void *data)
385
{
386
	struct kfd_ioctl_set_memory_policy_args *args = data;
387 388 389 390 391
	struct kfd_dev *dev;
	int err = 0;
	struct kfd_process_device *pdd;
	enum cache_policy default_policy, alternate_policy;

392 393
	if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
	    && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
394 395 396
		return -EINVAL;
	}

397 398
	if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
	    && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
399 400 401
		return -EINVAL;
	}

402
	dev = kfd_device_by_id(args->gpu_id);
403
	if (!dev)
404 405 406 407 408
		return -EINVAL;

	mutex_lock(&p->mutex);

	pdd = kfd_bind_process_to_device(dev, p);
409
	if (IS_ERR(pdd)) {
410
		err = -ESRCH;
411 412 413
		goto out;
	}

414
	default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
415 416 417
			 ? cache_policy_coherent : cache_policy_noncoherent;

	alternate_policy =
418
		(args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
419 420
		   ? cache_policy_coherent : cache_policy_noncoherent;

421
	if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm,
422 423 424
				&pdd->qpd,
				default_policy,
				alternate_policy,
425 426
				(void __user *)args->alternate_aperture_base,
				args->alternate_aperture_size))
427 428 429 430 431 432
		err = -EINVAL;

out:
	mutex_unlock(&p->mutex);

	return err;
433 434
}

435 436 437
static int kfd_ioctl_dbg_register(struct file *filep,
				struct kfd_process *p, void *data)
{
438 439 440 441 442 443 444 445
	struct kfd_ioctl_dbg_register_args *args = data;
	struct kfd_dev *dev;
	struct kfd_dbgmgr *dbgmgr_ptr;
	struct kfd_process_device *pdd;
	bool create_ok;
	long status = 0;

	dev = kfd_device_by_id(args->gpu_id);
446
	if (!dev)
447 448 449 450 451 452 453 454
		return -EINVAL;

	if (dev->device_info->asic_family == CHIP_CARRIZO) {
		pr_debug("kfd_ioctl_dbg_register not supported on CZ\n");
		return -EINVAL;
	}

	mutex_lock(&p->mutex);
455
	mutex_lock(kfd_get_dbgmgr_mutex());
456 457 458 459 460 461 462

	/*
	 * make sure that we have pdd, if this the first queue created for
	 * this process
	 */
	pdd = kfd_bind_process_to_device(dev, p);
	if (IS_ERR(pdd)) {
463 464
		status = PTR_ERR(pdd);
		goto out;
465 466
	}

467
	if (!dev->dbgmgr) {
468 469 470 471 472 473 474 475 476 477 478 479 480 481
		/* In case of a legal call, we have no dbgmgr yet */
		create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev);
		if (create_ok) {
			status = kfd_dbgmgr_register(dbgmgr_ptr, p);
			if (status != 0)
				kfd_dbgmgr_destroy(dbgmgr_ptr);
			else
				dev->dbgmgr = dbgmgr_ptr;
		}
	} else {
		pr_debug("debugger already registered\n");
		status = -EINVAL;
	}

482
out:
483
	mutex_unlock(kfd_get_dbgmgr_mutex());
484
	mutex_unlock(&p->mutex);
485 486 487 488

	return status;
}

489
static int kfd_ioctl_dbg_unregister(struct file *filep,
490 491
				struct kfd_process *p, void *data)
{
492 493 494 495 496
	struct kfd_ioctl_dbg_unregister_args *args = data;
	struct kfd_dev *dev;
	long status;

	dev = kfd_device_by_id(args->gpu_id);
497
	if (!dev)
498 499 500
		return -EINVAL;

	if (dev->device_info->asic_family == CHIP_CARRIZO) {
501
		pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n");
502 503 504 505 506 507
		return -EINVAL;
	}

	mutex_lock(kfd_get_dbgmgr_mutex());

	status = kfd_dbgmgr_unregister(dev->dbgmgr, p);
508
	if (!status) {
509 510 511 512 513
		kfd_dbgmgr_destroy(dev->dbgmgr);
		dev->dbgmgr = NULL;
	}

	mutex_unlock(kfd_get_dbgmgr_mutex());
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529

	return status;
}

/*
 * Parse and generate variable size data structure for address watch.
 * Total size of the buffer and # watch points is limited in order
 * to prevent kernel abuse. (no bearing to the much smaller HW limitation
 * which is enforced by dbgdev module)
 * please also note that the watch address itself are not "copied from user",
 * since it be set into the HW in user mode values.
 *
 */
static int kfd_ioctl_dbg_address_watch(struct file *filep,
					struct kfd_process *p, void *data)
{
530 531 532 533 534 535 536 537 538 539 540 541
	struct kfd_ioctl_dbg_address_watch_args *args = data;
	struct kfd_dev *dev;
	struct dbg_address_watch_info aw_info;
	unsigned char *args_buff;
	long status;
	void __user *cmd_from_user;
	uint64_t watch_mask_value = 0;
	unsigned int args_idx = 0;

	memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info));

	dev = kfd_device_by_id(args->gpu_id);
542
	if (!dev)
543 544 545 546 547 548 549 550 551 552 553 554
		return -EINVAL;

	if (dev->device_info->asic_family == CHIP_CARRIZO) {
		pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
		return -EINVAL;
	}

	cmd_from_user = (void __user *) args->content_ptr;

	/* Validate arguments */

	if ((args->buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE) ||
555
		(args->buf_size_in_bytes <= sizeof(*args) + sizeof(int) * 2) ||
556 557 558 559
		(cmd_from_user == NULL))
		return -EINVAL;

	/* this is the actual buffer to work with */
560
	args_buff = memdup_user(cmd_from_user,
561
				args->buf_size_in_bytes - sizeof(*args));
562 563
	if (IS_ERR(args_buff))
		return PTR_ERR(args_buff);
564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581

	aw_info.process = p;

	aw_info.num_watch_points = *((uint32_t *)(&args_buff[args_idx]));
	args_idx += sizeof(aw_info.num_watch_points);

	aw_info.watch_mode = (enum HSA_DBG_WATCH_MODE *) &args_buff[args_idx];
	args_idx += sizeof(enum HSA_DBG_WATCH_MODE) * aw_info.num_watch_points;

	/*
	 * set watch address base pointer to point on the array base
	 * within args_buff
	 */
	aw_info.watch_address = (uint64_t *) &args_buff[args_idx];

	/* skip over the addresses buffer */
	args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points;

582
	if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) {
583 584
		status = -EINVAL;
		goto out;
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
	}

	watch_mask_value = (uint64_t) args_buff[args_idx];

	if (watch_mask_value > 0) {
		/*
		 * There is an array of masks.
		 * set watch mask base pointer to point on the array base
		 * within args_buff
		 */
		aw_info.watch_mask = (uint64_t *) &args_buff[args_idx];

		/* skip over the masks buffer */
		args_idx += sizeof(aw_info.watch_mask) *
				aw_info.num_watch_points;
	} else {
		/* just the NULL mask, set to NULL and skip over it */
		aw_info.watch_mask = NULL;
		args_idx += sizeof(aw_info.watch_mask);
	}

606
	if (args_idx >= args->buf_size_in_bytes - sizeof(args)) {
607 608
		status = -EINVAL;
		goto out;
609 610 611 612 613 614 615 616 617 618 619
	}

	/* Currently HSA Event is not supported for DBG */
	aw_info.watch_event = NULL;

	mutex_lock(kfd_get_dbgmgr_mutex());

	status = kfd_dbgmgr_address_watch(dev->dbgmgr, &aw_info);

	mutex_unlock(kfd_get_dbgmgr_mutex());

620
out:
621
	kfree(args_buff);
622 623 624 625 626 627 628 629

	return status;
}

/* Parse and generate fixed size data structure for wave control */
static int kfd_ioctl_dbg_wave_control(struct file *filep,
					struct kfd_process *p, void *data)
{
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
	struct kfd_ioctl_dbg_wave_control_args *args = data;
	struct kfd_dev *dev;
	struct dbg_wave_control_info wac_info;
	unsigned char *args_buff;
	uint32_t computed_buff_size;
	long status;
	void __user *cmd_from_user;
	unsigned int args_idx = 0;

	memset((void *) &wac_info, 0, sizeof(struct dbg_wave_control_info));

	/* we use compact form, independent of the packing attribute value */
	computed_buff_size = sizeof(*args) +
				sizeof(wac_info.mode) +
				sizeof(wac_info.operand) +
				sizeof(wac_info.dbgWave_msg.DbgWaveMsg) +
				sizeof(wac_info.dbgWave_msg.MemoryVA) +
				sizeof(wac_info.trapId);

	dev = kfd_device_by_id(args->gpu_id);
650
	if (!dev)
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
		return -EINVAL;

	if (dev->device_info->asic_family == CHIP_CARRIZO) {
		pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
		return -EINVAL;
	}

	/* input size must match the computed "compact" size */
	if (args->buf_size_in_bytes != computed_buff_size) {
		pr_debug("size mismatch, computed : actual %u : %u\n",
				args->buf_size_in_bytes, computed_buff_size);
		return -EINVAL;
	}

	cmd_from_user = (void __user *) args->content_ptr;

	if (cmd_from_user == NULL)
		return -EINVAL;

670
	/* copy the entire buffer from user */
671

672
	args_buff = memdup_user(cmd_from_user,
673
				args->buf_size_in_bytes - sizeof(*args));
674 675
	if (IS_ERR(args_buff))
		return PTR_ERR(args_buff);
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706

	/* move ptr to the start of the "pay-load" area */
	wac_info.process = p;

	wac_info.operand = *((enum HSA_DBG_WAVEOP *)(&args_buff[args_idx]));
	args_idx += sizeof(wac_info.operand);

	wac_info.mode = *((enum HSA_DBG_WAVEMODE *)(&args_buff[args_idx]));
	args_idx += sizeof(wac_info.mode);

	wac_info.trapId = *((uint32_t *)(&args_buff[args_idx]));
	args_idx += sizeof(wac_info.trapId);

	wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value =
					*((uint32_t *)(&args_buff[args_idx]));
	wac_info.dbgWave_msg.MemoryVA = NULL;

	mutex_lock(kfd_get_dbgmgr_mutex());

	pr_debug("Calling dbg manager process %p, operand %u, mode %u, trapId %u, message %u\n",
			wac_info.process, wac_info.operand,
			wac_info.mode, wac_info.trapId,
			wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value);

	status = kfd_dbgmgr_wave_control(dev->dbgmgr, &wac_info);

	pr_debug("Returned status of dbg manager is %ld\n", status);

	mutex_unlock(kfd_get_dbgmgr_mutex());

	kfree(args_buff);
707 708 709 710

	return status;
}

711 712
static int kfd_ioctl_get_clock_counters(struct file *filep,
				struct kfd_process *p, void *data)
713
{
714
	struct kfd_ioctl_get_clock_counters_args *args = data;
715
	struct kfd_dev *dev;
716
	struct timespec64 time;
717

718
	dev = kfd_device_by_id(args->gpu_id);
719 720 721 722
	if (dev == NULL)
		return -EINVAL;

	/* Reading GPU clock counter from KGD */
723 724
	args->gpu_clock_counter =
		dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
725 726

	/* No access to rdtsc. Using raw monotonic time */
727 728
	getrawmonotonic64(&time);
	args->cpu_clock_counter = (uint64_t)timespec64_to_ns(&time);
729

730 731
	get_monotonic_boottime64(&time);
	args->system_clock_counter = (uint64_t)timespec64_to_ns(&time);
732 733

	/* Since the counter is in nano-seconds we use 1GHz frequency */
734
	args->system_clock_freq = 1000000000;
735 736

	return 0;
737 738 739 740
}


static int kfd_ioctl_get_process_apertures(struct file *filp,
741
				struct kfd_process *p, void *data)
742
{
743
	struct kfd_ioctl_get_process_apertures_args *args = data;
744 745 746 747 748
	struct kfd_process_device_apertures *pAperture;
	struct kfd_process_device *pdd;

	dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);

749
	args->num_of_nodes = 0;
750 751 752 753 754 755 756 757

	mutex_lock(&p->mutex);

	/*if the process-device list isn't empty*/
	if (kfd_has_process_device_data(p)) {
		/* Run over all pdd of the process */
		pdd = kfd_get_first_process_device_data(p);
		do {
758 759
			pAperture =
				&args->process_apertures[args->num_of_nodes];
760 761 762 763 764 765 766 767 768
			pAperture->gpu_id = pdd->dev->id;
			pAperture->lds_base = pdd->lds_base;
			pAperture->lds_limit = pdd->lds_limit;
			pAperture->gpuvm_base = pdd->gpuvm_base;
			pAperture->gpuvm_limit = pdd->gpuvm_limit;
			pAperture->scratch_base = pdd->scratch_base;
			pAperture->scratch_limit = pdd->scratch_limit;

			dev_dbg(kfd_device,
769
				"node id %u\n", args->num_of_nodes);
770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
			dev_dbg(kfd_device,
				"gpu id %u\n", pdd->dev->id);
			dev_dbg(kfd_device,
				"lds_base %llX\n", pdd->lds_base);
			dev_dbg(kfd_device,
				"lds_limit %llX\n", pdd->lds_limit);
			dev_dbg(kfd_device,
				"gpuvm_base %llX\n", pdd->gpuvm_base);
			dev_dbg(kfd_device,
				"gpuvm_limit %llX\n", pdd->gpuvm_limit);
			dev_dbg(kfd_device,
				"scratch_base %llX\n", pdd->scratch_base);
			dev_dbg(kfd_device,
				"scratch_limit %llX\n", pdd->scratch_limit);

785
			args->num_of_nodes++;
786 787 788

			pdd = kfd_get_next_process_device_data(p, pdd);
		} while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
789 790 791 792 793
	}

	mutex_unlock(&p->mutex);

	return 0;
794 795
}

796 797 798
static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
					void *data)
{
799 800 801 802 803 804 805 806 807 808
	struct kfd_ioctl_create_event_args *args = data;
	int err;

	err = kfd_event_create(filp, p, args->event_type,
				args->auto_reset != 0, args->node_id,
				&args->event_id, &args->event_trigger_data,
				&args->event_page_offset,
				&args->event_slot_index);

	return err;
809 810 811 812 813
}

static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
					void *data)
{
814 815 816
	struct kfd_ioctl_destroy_event_args *args = data;

	return kfd_event_destroy(p, args->event_id);
817 818 819 820 821
}

static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
				void *data)
{
822 823 824
	struct kfd_ioctl_set_event_args *args = data;

	return kfd_set_event(p, args->event_id);
825 826 827 828 829
}

static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
				void *data)
{
830 831 832
	struct kfd_ioctl_reset_event_args *args = data;

	return kfd_reset_event(p, args->event_id);
833 834 835 836 837
}

static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
				void *data)
{
838 839 840 841 842 843
	struct kfd_ioctl_wait_events_args *args = data;
	int err;

	err = kfd_wait_on_events(p, args->num_events,
			(void __user *)args->events_ptr,
			(args->wait_for_all != 0),
844
			args->timeout, &args->wait_result);
845 846

	return err;
847
}
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881
static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
					struct kfd_process *p, void *data)
{
	struct kfd_ioctl_set_scratch_backing_va_args *args = data;
	struct kfd_process_device *pdd;
	struct kfd_dev *dev;
	long err;

	dev = kfd_device_by_id(args->gpu_id);
	if (!dev)
		return -EINVAL;

	mutex_lock(&p->mutex);

	pdd = kfd_bind_process_to_device(dev, p);
	if (IS_ERR(pdd)) {
		err = PTR_ERR(pdd);
		goto bind_process_to_device_fail;
	}

	pdd->qpd.sh_hidden_private_base = args->va_addr;

	mutex_unlock(&p->mutex);

	if (sched_policy == KFD_SCHED_POLICY_NO_HWS && pdd->qpd.vmid != 0)
		dev->kfd2kgd->set_scratch_backing_va(
			dev->kgd, args->va_addr, pdd->qpd.vmid);

	return 0;

bind_process_to_device_fail:
	mutex_unlock(&p->mutex);
	return err;
}
882

883 884 885 886 887 888 889 890 891
static int kfd_ioctl_get_tile_config(struct file *filep,
		struct kfd_process *p, void *data)
{
	struct kfd_ioctl_get_tile_config_args *args = data;
	struct kfd_dev *dev;
	struct tile_config config;
	int err = 0;

	dev = kfd_device_by_id(args->gpu_id);
892 893
	if (!dev)
		return -EINVAL;
894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924

	dev->kfd2kgd->get_tile_config(dev->kgd, &config);

	args->gb_addr_config = config.gb_addr_config;
	args->num_banks = config.num_banks;
	args->num_ranks = config.num_ranks;

	if (args->num_tile_configs > config.num_tile_configs)
		args->num_tile_configs = config.num_tile_configs;
	err = copy_to_user((void __user *)args->tile_config_ptr,
			config.tile_config_ptr,
			args->num_tile_configs * sizeof(uint32_t));
	if (err) {
		args->num_tile_configs = 0;
		return -EFAULT;
	}

	if (args->num_macro_tile_configs > config.num_macro_tile_configs)
		args->num_macro_tile_configs =
				config.num_macro_tile_configs;
	err = copy_to_user((void __user *)args->macro_tile_config_ptr,
			config.macro_tile_config_ptr,
			args->num_macro_tile_configs * sizeof(uint32_t));
	if (err) {
		args->num_macro_tile_configs = 0;
		return -EFAULT;
	}

	return 0;
}

925
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
926 927
	[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
			    .cmd_drv = 0, .name = #ioctl}
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950

/** Ioctl table */
static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
			kfd_ioctl_get_version, 0),

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
			kfd_ioctl_create_queue, 0),

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
			kfd_ioctl_destroy_queue, 0),

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
			kfd_ioctl_set_memory_policy, 0),

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
			kfd_ioctl_get_clock_counters, 0),

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
			kfd_ioctl_get_process_apertures, 0),

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
			kfd_ioctl_update_queue, 0),
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT,
			kfd_ioctl_create_event, 0),

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT,
			kfd_ioctl_destroy_event, 0),

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT,
			kfd_ioctl_set_event, 0),

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT,
			kfd_ioctl_reset_event, 0),

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
			kfd_ioctl_wait_events, 0),
966 967 968 969 970

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER,
			kfd_ioctl_dbg_register, 0),

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER,
971
			kfd_ioctl_dbg_unregister, 0),
972 973 974 975 976 977

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH,
			kfd_ioctl_dbg_address_watch, 0),

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL,
			kfd_ioctl_dbg_wave_control, 0),
978 979 980

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
			kfd_ioctl_set_scratch_backing_va, 0),
981 982 983

	AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
			kfd_ioctl_get_tile_config, 0)
984 985 986 987
};

#define AMDKFD_CORE_IOCTL_COUNT	ARRAY_SIZE(amdkfd_ioctls)

988 989 990
static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
	struct kfd_process *process;
991 992 993
	amdkfd_ioctl_t *func;
	const struct amdkfd_ioctl_desc *ioctl = NULL;
	unsigned int nr = _IOC_NR(cmd);
994 995 996 997
	char stack_kdata[128];
	char *kdata = NULL;
	unsigned int usize, asize;
	int retcode = -EINVAL;
998

999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
	if (nr >= AMDKFD_CORE_IOCTL_COUNT)
		goto err_i1;

	if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
		u32 amdkfd_size;

		ioctl = &amdkfd_ioctls[nr];

		amdkfd_size = _IOC_SIZE(ioctl->cmd);
		usize = asize = _IOC_SIZE(cmd);
		if (amdkfd_size > asize)
			asize = amdkfd_size;

		cmd = ioctl->cmd;
	} else
		goto err_i1;

	dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
1017

1018
	process = kfd_get_process(current);
1019 1020 1021 1022
	if (IS_ERR(process)) {
		dev_dbg(kfd_device, "no process\n");
		goto err_i1;
	}
1023

1024 1025 1026 1027 1028 1029 1030
	/* Do not trust userspace, use our own definition */
	func = ioctl->func;

	if (unlikely(!func)) {
		dev_dbg(kfd_device, "no function\n");
		retcode = -EINVAL;
		goto err_i1;
1031 1032
	}

1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
	if (cmd & (IOC_IN | IOC_OUT)) {
		if (asize <= sizeof(stack_kdata)) {
			kdata = stack_kdata;
		} else {
			kdata = kmalloc(asize, GFP_KERNEL);
			if (!kdata) {
				retcode = -ENOMEM;
				goto err_i1;
			}
		}
		if (asize > usize)
			memset(kdata + usize, 0, asize - usize);
	}
1046

1047 1048 1049 1050 1051 1052 1053 1054 1055
	if (cmd & IOC_IN) {
		if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
			retcode = -EFAULT;
			goto err_i1;
		}
	} else if (cmd & IOC_OUT) {
		memset(kdata, 0, usize);
	}

1056
	retcode = func(filep, process, kdata);
1057

1058 1059 1060
	if (cmd & IOC_OUT)
		if (copy_to_user((void __user *)arg, kdata, usize) != 0)
			retcode = -EFAULT;
1061

1062
err_i1:
1063 1064 1065 1066
	if (!ioctl)
		dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
			  task_pid_nr(current), cmd, nr);

1067 1068 1069 1070 1071 1072 1073
	if (kdata != stack_kdata)
		kfree(kdata);

	if (retcode)
		dev_dbg(kfd_device, "ret = %d\n", retcode);

	return retcode;
1074
}
1075 1076 1077 1078 1079 1080 1081 1082 1083

static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct kfd_process *process;

	process = kfd_get_process(current);
	if (IS_ERR(process))
		return PTR_ERR(process);

1084 1085 1086 1087 1088 1089 1090 1091
	if ((vma->vm_pgoff & KFD_MMAP_DOORBELL_MASK) ==
			KFD_MMAP_DOORBELL_MASK) {
		vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_DOORBELL_MASK;
		return kfd_doorbell_mmap(process, vma);
	} else if ((vma->vm_pgoff & KFD_MMAP_EVENTS_MASK) ==
			KFD_MMAP_EVENTS_MASK) {
		vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_EVENTS_MASK;
		return kfd_event_mmap(process, vma);
1092 1093 1094 1095
	} else if ((vma->vm_pgoff & KFD_MMAP_RESERVED_MEM_MASK) ==
			KFD_MMAP_RESERVED_MEM_MASK) {
		vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_RESERVED_MEM_MASK;
		return kfd_reserved_mem_mmap(process, vma);
1096 1097 1098
	}

	return -EFAULT;
1099
}