selftest_hangcheck.c 39.5 KB
Newer Older
Chris Wilson's avatar
Chris Wilson committed
1
// SPDX-License-Identifier: MIT
2 3 4 5
/*
 * Copyright © 2016 Intel Corporation
 */

6 7
#include <linux/kthread.h>

8
#include "gem/i915_gem_context.h"
9 10 11

#include "intel_gt.h"
#include "intel_engine_heartbeat.h"
12
#include "intel_engine_pm.h"
13
#include "selftest_engine_heartbeat.h"
14

15 16 17 18
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_reset.h"
19
#include "selftests/igt_atomic.h"
20

21
#include "selftests/mock_drm.h"
22

23 24 25
#include "gem/selftests/mock_context.h"
#include "gem/selftests/igt_gem_utils.h"

26 27
#define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */

28
struct hang {
29
	struct intel_gt *gt;
30 31
	struct drm_i915_gem_object *hws;
	struct drm_i915_gem_object *obj;
32
	struct i915_gem_context *ctx;
33 34 35 36
	u32 *seqno;
	u32 *batch;
};

37
static int hang_init(struct hang *h, struct intel_gt *gt)
38 39 40 41 42
{
	void *vaddr;
	int err;

	memset(h, 0, sizeof(*h));
43
	h->gt = gt;
44

45
	h->ctx = kernel_context(gt->i915);
46 47 48
	if (IS_ERR(h->ctx))
		return PTR_ERR(h->ctx);

49 50
	GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));

51
	h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
52 53 54 55
	if (IS_ERR(h->hws)) {
		err = PTR_ERR(h->hws);
		goto err_ctx;
	}
56

57
	h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
58 59 60 61 62
	if (IS_ERR(h->obj)) {
		err = PTR_ERR(h->obj);
		goto err_hws;
	}

63
	i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
64 65 66 67 68 69 70 71
	vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		err = PTR_ERR(vaddr);
		goto err_obj;
	}
	h->seqno = memset(vaddr, 0xff, PAGE_SIZE);

	vaddr = i915_gem_object_pin_map(h->obj,
72
					i915_coherent_map_type(gt->i915));
73 74 75 76 77 78 79 80 81 82 83 84 85 86
	if (IS_ERR(vaddr)) {
		err = PTR_ERR(vaddr);
		goto err_unpin_hws;
	}
	h->batch = vaddr;

	return 0;

err_unpin_hws:
	i915_gem_object_unpin_map(h->hws);
err_obj:
	i915_gem_object_put(h->obj);
err_hws:
	i915_gem_object_put(h->hws);
87 88
err_ctx:
	kernel_context_close(h->ctx);
89 90 91 92
	return err;
}

static u64 hws_address(const struct i915_vma *hws,
93
		       const struct i915_request *rq)
94 95 96 97
{
	return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
}

98 99 100 101 102 103
static int move_to_active(struct i915_vma *vma,
			  struct i915_request *rq,
			  unsigned int flags)
{
	int err;

104
	i915_vma_lock(vma);
105 106 107 108
	err = i915_request_await_object(rq, vma->obj,
					flags & EXEC_OBJECT_WRITE);
	if (err == 0)
		err = i915_vma_move_to_active(vma, rq, flags);
109
	i915_vma_unlock(vma);
110

111
	return err;
112 113 114 115
}

static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
116
{
117
	struct intel_gt *gt = h->gt;
118
	struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx);
119
	struct drm_i915_gem_object *obj;
120
	struct i915_request *rq = NULL;
121 122
	struct i915_vma *hws, *vma;
	unsigned int flags;
123
	void *vaddr;
124 125 126
	u32 *batch;
	int err;

127
	obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
128 129
	if (IS_ERR(obj)) {
		i915_vm_put(vm);
130
		return ERR_CAST(obj);
131
	}
132

133
	vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915));
134 135
	if (IS_ERR(vaddr)) {
		i915_gem_object_put(obj);
136
		i915_vm_put(vm);
137 138
		return ERR_CAST(vaddr);
	}
139

140 141
	i915_gem_object_unpin_map(h->obj);
	i915_gem_object_put(h->obj);
142

143 144
	h->obj = obj;
	h->batch = vaddr;
145

146
	vma = i915_vma_instance(h->obj, vm, NULL);
147 148
	if (IS_ERR(vma)) {
		i915_vm_put(vm);
149
		return ERR_CAST(vma);
150
	}
151 152

	hws = i915_vma_instance(h->hws, vm, NULL);
153 154
	if (IS_ERR(hws)) {
		i915_vm_put(vm);
155
		return ERR_CAST(hws);
156
	}
157 158

	err = i915_vma_pin(vma, 0, 0, PIN_USER);
159 160
	if (err) {
		i915_vm_put(vm);
161
		return ERR_PTR(err);
162
	}
163 164 165 166 167

	err = i915_vma_pin(hws, 0, 0, PIN_USER);
	if (err)
		goto unpin_vma;

168
	rq = igt_request_alloc(h->ctx, engine);
169 170
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
171
		goto unpin_hws;
172 173
	}

174
	err = move_to_active(vma, rq, 0);
175
	if (err)
176
		goto cancel_rq;
177

178 179 180
	err = move_to_active(hws, rq, 0);
	if (err)
		goto cancel_rq;
181 182

	batch = h->batch;
183
	if (INTEL_GEN(gt->i915) >= 8) {
184 185 186 187
		*batch++ = MI_STORE_DWORD_IMM_GEN4;
		*batch++ = lower_32_bits(hws_address(hws, rq));
		*batch++ = upper_32_bits(hws_address(hws, rq));
		*batch++ = rq->fence.seqno;
188
		*batch++ = MI_NOOP;
189 190 191 192

		memset(batch, 0, 1024);
		batch += 1024 / sizeof(*batch);

193
		*batch++ = MI_NOOP;
194 195 196
		*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
		*batch++ = lower_32_bits(vma->node.start);
		*batch++ = upper_32_bits(vma->node.start);
197
	} else if (INTEL_GEN(gt->i915) >= 6) {
198 199 200 201
		*batch++ = MI_STORE_DWORD_IMM_GEN4;
		*batch++ = 0;
		*batch++ = lower_32_bits(hws_address(hws, rq));
		*batch++ = rq->fence.seqno;
202
		*batch++ = MI_NOOP;
203 204 205 206

		memset(batch, 0, 1024);
		batch += 1024 / sizeof(*batch);

207
		*batch++ = MI_NOOP;
208 209
		*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
		*batch++ = lower_32_bits(vma->node.start);
210
	} else if (INTEL_GEN(gt->i915) >= 4) {
211
		*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
212 213 214
		*batch++ = 0;
		*batch++ = lower_32_bits(hws_address(hws, rq));
		*batch++ = rq->fence.seqno;
215
		*batch++ = MI_NOOP;
216 217 218 219

		memset(batch, 0, 1024);
		batch += 1024 / sizeof(*batch);

220
		*batch++ = MI_NOOP;
221 222 223
		*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
		*batch++ = lower_32_bits(vma->node.start);
	} else {
224
		*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
225 226
		*batch++ = lower_32_bits(hws_address(hws, rq));
		*batch++ = rq->fence.seqno;
227
		*batch++ = MI_NOOP;
228 229 230 231

		memset(batch, 0, 1024);
		batch += 1024 / sizeof(*batch);

232
		*batch++ = MI_NOOP;
233
		*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
234 235 236
		*batch++ = lower_32_bits(vma->node.start);
	}
	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
237
	intel_gt_chipset_flush(engine->gt);
238

239 240 241 242 243 244
	if (rq->engine->emit_init_breadcrumb) {
		err = rq->engine->emit_init_breadcrumb(rq);
		if (err)
			goto cancel_rq;
	}

245
	flags = 0;
246
	if (INTEL_GEN(gt->i915) <= 5)
247 248 249 250
		flags |= I915_DISPATCH_SECURE;

	err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);

251 252
cancel_rq:
	if (err) {
253
		i915_request_set_error_once(rq, err);
254 255
		i915_request_add(rq);
	}
256
unpin_hws:
257 258 259
	i915_vma_unpin(hws);
unpin_vma:
	i915_vma_unpin(vma);
260
	i915_vm_put(vm);
261
	return err ? ERR_PTR(err) : rq;
262 263
}

264
static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
265 266 267 268 269 270 271
{
	return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
}

static void hang_fini(struct hang *h)
{
	*h->batch = MI_BATCH_BUFFER_END;
272
	intel_gt_chipset_flush(h->gt);
273 274 275 276 277 278 279

	i915_gem_object_unpin_map(h->obj);
	i915_gem_object_put(h->obj);

	i915_gem_object_unpin_map(h->hws);
	i915_gem_object_put(h->hws);

280 281
	kernel_context_close(h->ctx);

282
	igt_flush_test(h->gt->i915);
283 284
}

285
static bool wait_until_running(struct hang *h, struct i915_request *rq)
286 287 288 289 290 291 292 293 294
{
	return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
					       rq->fence.seqno),
			     10) &&
		 wait_for(i915_seqno_passed(hws_seqno(h, rq),
					    rq->fence.seqno),
			  1000));
}

295 296
static int igt_hang_sanitycheck(void *arg)
{
297
	struct intel_gt *gt = arg;
298
	struct i915_request *rq;
299 300 301 302 303 304 305
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	struct hang h;
	int err;

	/* Basic check that we can execute our hanging batch */

306
	err = hang_init(&h, gt);
307
	if (err)
308
		return err;
309

310
	for_each_engine(engine, gt, id) {
311
		struct intel_wedge_me w;
312 313
		long timeout;

314 315 316
		if (!intel_engine_can_store_dword(engine))
			continue;

317
		rq = hang_create_request(&h, engine);
318 319 320 321 322 323 324
		if (IS_ERR(rq)) {
			err = PTR_ERR(rq);
			pr_err("Failed to create request for %s, err=%d\n",
			       engine->name, err);
			goto fini;
		}

325
		i915_request_get(rq);
326 327

		*h.batch = MI_BATCH_BUFFER_END;
328
		intel_gt_chipset_flush(engine->gt);
329

330
		i915_request_add(rq);
331

332
		timeout = 0;
333
		intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
334
			timeout = i915_request_wait(rq, 0,
335
						    MAX_SCHEDULE_TIMEOUT);
336
		if (intel_gt_is_wedged(gt))
337 338
			timeout = -EIO;

339
		i915_request_put(rq);
340 341 342 343 344 345 346 347 348 349 350 351 352 353

		if (timeout < 0) {
			err = timeout;
			pr_err("Wait for request failed on %s, err=%d\n",
			       engine->name, err);
			goto fini;
		}
	}

fini:
	hang_fini(&h);
	return err;
}

354 355 356 357 358
static bool wait_for_idle(struct intel_engine_cs *engine)
{
	return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
}

359 360
static int igt_reset_nop(void *arg)
{
361 362
	struct intel_gt *gt = arg;
	struct i915_gpu_error *global = &gt->i915->gpu_error;
363 364 365 366 367 368 369 370
	struct intel_engine_cs *engine;
	unsigned int reset_count, count;
	enum intel_engine_id id;
	IGT_TIMEOUT(end_time);
	int err = 0;

	/* Check that we can reset during non-user portions of requests */

371
	reset_count = i915_reset_count(global);
372 373
	count = 0;
	do {
374
		for_each_engine(engine, gt, id) {
375
			struct intel_context *ce;
376 377
			int i;

378 379 380 381 382 383
			ce = intel_context_create(engine);
			if (IS_ERR(ce)) {
				err = PTR_ERR(ce);
				break;
			}

384 385 386
			for (i = 0; i < 16; i++) {
				struct i915_request *rq;

387
				rq = intel_context_create_request(ce);
388 389 390 391 392 393 394
				if (IS_ERR(rq)) {
					err = PTR_ERR(rq);
					break;
				}

				i915_request_add(rq);
			}
395 396

			intel_context_put(ce);
397 398
		}

399 400 401
		igt_global_reset_lock(gt);
		intel_gt_reset(gt, ALL_ENGINES, NULL);
		igt_global_reset_unlock(gt);
402

403
		if (intel_gt_is_wedged(gt)) {
404 405 406 407
			err = -EIO;
			break;
		}

408
		if (i915_reset_count(global) != reset_count + ++count) {
409 410 411 412 413
			pr_err("Full GPU reset not recorded!\n");
			err = -EINVAL;
			break;
		}

414
		err = igt_flush_test(gt->i915);
415 416 417 418 419
		if (err)
			break;
	} while (time_before(jiffies, end_time));
	pr_info("%s: %d resets\n", __func__, count);

420
	if (igt_flush_test(gt->i915))
421 422 423 424 425 426
		err = -EIO;
	return err;
}

static int igt_reset_nop_engine(void *arg)
{
427 428
	struct intel_gt *gt = arg;
	struct i915_gpu_error *global = &gt->i915->gpu_error;
429 430 431 432 433
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	/* Check that we can engine-reset during non-user portions */

434
	if (!intel_has_reset_engine(gt))
435 436
		return 0;

437
	for_each_engine(engine, gt, id) {
438 439
		unsigned int reset_count, reset_engine_count, count;
		struct intel_context *ce;
440
		IGT_TIMEOUT(end_time);
441 442 443 444 445
		int err;

		ce = intel_context_create(engine);
		if (IS_ERR(ce))
			return PTR_ERR(ce);
446

447 448
		reset_count = i915_reset_count(global);
		reset_engine_count = i915_reset_engine_count(global, engine);
449 450
		count = 0;

451
		st_engine_heartbeat_disable(engine);
452
		set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
453 454 455 456 457 458 459 460 461 462 463 464 465
		do {
			int i;

			if (!wait_for_idle(engine)) {
				pr_err("%s failed to idle before reset\n",
				       engine->name);
				err = -EIO;
				break;
			}

			for (i = 0; i < 16; i++) {
				struct i915_request *rq;

466
				rq = intel_context_create_request(ce);
467
				if (IS_ERR(rq)) {
468 469 470 471 472 473 474 475 476 477 478 479 480 481
					struct drm_printer p =
						drm_info_printer(gt->i915->drm.dev);
					intel_engine_dump(engine, &p,
							  "%s(%s): failed to submit request\n",
							  __func__,
							  engine->name);

					GEM_TRACE("%s(%s): failed to submit request\n",
						  __func__,
						  engine->name);
					GEM_TRACE_DUMP();

					intel_gt_set_wedged(gt);

482 483 484 485 486 487
					err = PTR_ERR(rq);
					break;
				}

				i915_request_add(rq);
			}
488
			err = intel_engine_reset(engine, NULL);
489
			if (err) {
490 491
				pr_err("intel_engine_reset(%s) failed, err:%d\n",
				       engine->name, err);
492 493 494
				break;
			}

495
			if (i915_reset_count(global) != reset_count) {
496 497 498 499 500
				pr_err("Full GPU reset recorded! (engine reset expected)\n");
				err = -EINVAL;
				break;
			}

501
			if (i915_reset_engine_count(global, engine) !=
502 503 504 505 506 507 508
			    reset_engine_count + ++count) {
				pr_err("%s engine reset not recorded!\n",
				       engine->name);
				err = -EINVAL;
				break;
			}
		} while (time_before(jiffies, end_time));
509
		clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
510
		st_engine_heartbeat_enable(engine);
511

512
		pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
513

514 515 516
		intel_context_put(ce);
		if (igt_flush_test(gt->i915))
			err = -EIO;
517
		if (err)
518
			return err;
519 520
	}

521
	return 0;
522 523
}

524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
static void force_reset_timeout(struct intel_engine_cs *engine)
{
	engine->reset_timeout.probability = 999;
	atomic_set(&engine->reset_timeout.times, -1);
}

static void cancel_reset_timeout(struct intel_engine_cs *engine)
{
	memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
}

static int igt_reset_fail_engine(void *arg)
{
	struct intel_gt *gt = arg;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	/* Check that we can recover from engine-reset failues */

	if (!intel_has_reset_engine(gt))
		return 0;

	for_each_engine(engine, gt, id) {
		unsigned int count;
		struct intel_context *ce;
		IGT_TIMEOUT(end_time);
		int err;

		ce = intel_context_create(engine);
		if (IS_ERR(ce))
			return PTR_ERR(ce);

		st_engine_heartbeat_disable(engine);
		set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);

		force_reset_timeout(engine);
		err = intel_engine_reset(engine, NULL);
		cancel_reset_timeout(engine);
		if (err == 0) /* timeouts only generated on gen8+ */
			goto skip;

		count = 0;
		do {
			struct i915_request *last = NULL;
			int i;

			if (!wait_for_idle(engine)) {
				pr_err("%s failed to idle before reset\n",
				       engine->name);
				err = -EIO;
				break;
			}

			for (i = 0; i < count % 15; i++) {
				struct i915_request *rq;

				rq = intel_context_create_request(ce);
				if (IS_ERR(rq)) {
					struct drm_printer p =
						drm_info_printer(gt->i915->drm.dev);
					intel_engine_dump(engine, &p,
							  "%s(%s): failed to submit request\n",
							  __func__,
							  engine->name);

					GEM_TRACE("%s(%s): failed to submit request\n",
						  __func__,
						  engine->name);
					GEM_TRACE_DUMP();

					intel_gt_set_wedged(gt);
					if (last)
						i915_request_put(last);

					err = PTR_ERR(rq);
					goto out;
				}

				if (last)
					i915_request_put(last);
				last = i915_request_get(rq);
				i915_request_add(rq);
			}

			if (count & 1) {
				err = intel_engine_reset(engine, NULL);
				if (err) {
					GEM_TRACE_ERR("intel_engine_reset(%s) failed, err:%d\n",
						      engine->name, err);
					GEM_TRACE_DUMP();
					i915_request_put(last);
					break;
				}
			} else {
				force_reset_timeout(engine);
				err = intel_engine_reset(engine, NULL);
				cancel_reset_timeout(engine);
				if (err != -ETIMEDOUT) {
					pr_err("intel_engine_reset(%s) did not fail, err:%d\n",
					       engine->name, err);
					i915_request_put(last);
					break;
				}
			}

			err = 0;
			if (last) {
				if (i915_request_wait(last, 0, HZ / 2) < 0) {
					struct drm_printer p =
						drm_info_printer(gt->i915->drm.dev);

					intel_engine_dump(engine, &p,
							  "%s(%s): failed to complete request\n",
							  __func__,
							  engine->name);

					GEM_TRACE("%s(%s): failed to complete request\n",
						  __func__,
						  engine->name);
					GEM_TRACE_DUMP();

					err = -EIO;
				}
				i915_request_put(last);
			}
			count++;
		} while (err == 0 && time_before(jiffies, end_time));
out:
		pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
skip:
		clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
		st_engine_heartbeat_enable(engine);
		intel_context_put(ce);

		if (igt_flush_test(gt->i915))
			err = -EIO;
		if (err)
			return err;
	}

	return 0;
}

667
static int __igt_reset_engine(struct intel_gt *gt, bool active)
668
{
669
	struct i915_gpu_error *global = &gt->i915->gpu_error;
670 671
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
672
	struct hang h;
673 674
	int err = 0;

675
	/* Check that we can issue an engine reset on an idle engine (no-op) */
676

677
	if (!intel_has_reset_engine(gt))
678 679
		return 0;

680
	if (active) {
681
		err = hang_init(&h, gt);
682 683 684 685
		if (err)
			return err;
	}

686
	for_each_engine(engine, gt, id) {
687
		unsigned int reset_count, reset_engine_count;
688
		unsigned long count;
689 690 691 692 693
		IGT_TIMEOUT(end_time);

		if (active && !intel_engine_can_store_dword(engine))
			continue;

694 695 696 697 698 699 700
		if (!wait_for_idle(engine)) {
			pr_err("%s failed to idle before reset\n",
			       engine->name);
			err = -EIO;
			break;
		}

701 702
		reset_count = i915_reset_count(global);
		reset_engine_count = i915_reset_engine_count(global, engine);
703

704
		st_engine_heartbeat_disable(engine);
705
		set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
706
		count = 0;
707 708
		do {
			if (active) {
709
				struct i915_request *rq;
710

711
				rq = hang_create_request(&h, engine);
712 713 714 715 716
				if (IS_ERR(rq)) {
					err = PTR_ERR(rq);
					break;
				}

717
				i915_request_get(rq);
718
				i915_request_add(rq);
719

720
				if (!wait_until_running(&h, rq)) {
721
					struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
722

723
					pr_err("%s: Failed to start request %llx, at %x\n",
724 725 726 727
					       __func__, rq->fence.seqno, hws_seqno(&h, rq));
					intel_engine_dump(engine, &p,
							  "%s\n", engine->name);

728
					i915_request_put(rq);
729 730 731
					err = -EIO;
					break;
				}
732

733
				i915_request_put(rq);
734 735
			}

736
			err = intel_engine_reset(engine, NULL);
737
			if (err) {
738 739
				pr_err("intel_engine_reset(%s) failed, err:%d\n",
				       engine->name, err);
740 741 742
				break;
			}

743
			if (i915_reset_count(global) != reset_count) {
744 745 746 747 748
				pr_err("Full GPU reset recorded! (engine reset expected)\n");
				err = -EINVAL;
				break;
			}

749
			if (i915_reset_engine_count(global, engine) !=
750 751 752
			    ++reset_engine_count) {
				pr_err("%s engine reset not recorded!\n",
				       engine->name);
753 754 755
				err = -EINVAL;
				break;
			}
756 757

			count++;
758
		} while (time_before(jiffies, end_time));
759
		clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
760
		st_engine_heartbeat_enable(engine);
761 762
		pr_info("%s: Completed %lu %s resets\n",
			engine->name, count, active ? "active" : "idle");
763

764
		if (err)
765 766
			break;

767
		err = igt_flush_test(gt->i915);
768 769
		if (err)
			break;
770 771
	}

772
	if (intel_gt_is_wedged(gt))
773 774
		err = -EIO;

775
	if (active)
776 777
		hang_fini(&h);

778 779 780
	return err;
}

781 782 783 784 785 786 787 788 789 790
static int igt_reset_idle_engine(void *arg)
{
	return __igt_reset_engine(arg, false);
}

static int igt_reset_active_engine(void *arg)
{
	return __igt_reset_engine(arg, true);
}

791 792 793 794 795 796 797 798 799 800 801 802
struct active_engine {
	struct task_struct *task;
	struct intel_engine_cs *engine;
	unsigned long resets;
	unsigned int flags;
};

#define TEST_ACTIVE	BIT(0)
#define TEST_OTHERS	BIT(1)
#define TEST_SELF	BIT(2)
#define TEST_PRIORITY	BIT(3)

803 804 805 806 807 808 809 810
static int active_request_put(struct i915_request *rq)
{
	int err = 0;

	if (!rq)
		return 0;

	if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
811
		GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
812 813
			  rq->engine->name,
			  rq->fence.context,
814
			  rq->fence.seqno);
815 816
		GEM_TRACE_DUMP();

817
		intel_gt_set_wedged(rq->engine->gt);
818 819 820 821 822 823 824 825
		err = -EIO;
	}

	i915_request_put(rq);

	return err;
}

826 827
static int active_engine(void *data)
{
828 829 830 831
	I915_RND_STATE(prng);
	struct active_engine *arg = data;
	struct intel_engine_cs *engine = arg->engine;
	struct i915_request *rq[8] = {};
832 833
	struct intel_context *ce[ARRAY_SIZE(rq)];
	unsigned long count;
834 835
	int err = 0;

836 837 838 839
	for (count = 0; count < ARRAY_SIZE(ce); count++) {
		ce[count] = intel_context_create(engine);
		if (IS_ERR(ce[count])) {
			err = PTR_ERR(ce[count]);
840
			while (--count)
841 842
				intel_context_put(ce[count]);
			return err;
843
		}
844 845
	}

846
	count = 0;
847
	while (!kthread_should_stop()) {
848
		unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
849 850
		struct i915_request *old = rq[idx];
		struct i915_request *new;
851

852
		new = intel_context_create_request(ce[idx]);
853 854 855 856 857
		if (IS_ERR(new)) {
			err = PTR_ERR(new);
			break;
		}

858 859
		rq[idx] = i915_request_get(new);
		i915_request_add(new);
860

861 862 863 864 865 866 867 868
		if (engine->schedule && arg->flags & TEST_PRIORITY) {
			struct i915_sched_attr attr = {
				.priority =
					i915_prandom_u32_max_state(512, &prng),
			};
			engine->schedule(rq[idx], &attr);
		}

869 870 871
		err = active_request_put(old);
		if (err)
			break;
872 873

		cond_resched();
874 875
	}

876 877 878 879 880 881
	for (count = 0; count < ARRAY_SIZE(rq); count++) {
		int err__ = active_request_put(rq[count]);

		/* Keep the first error */
		if (!err)
			err = err__;
882 883

		intel_context_put(ce[count]);
884
	}
885 886 887 888

	return err;
}

889
static int __igt_reset_engines(struct intel_gt *gt,
890 891
			       const char *test_name,
			       unsigned int flags)
892
{
893
	struct i915_gpu_error *global = &gt->i915->gpu_error;
894
	struct intel_engine_cs *engine, *other;
895
	enum intel_engine_id id, tmp;
896
	struct hang h;
897 898 899 900 901 902
	int err = 0;

	/* Check that issuing a reset on one engine does not interfere
	 * with any other engine.
	 */

903
	if (!intel_has_reset_engine(gt))
904 905
		return 0;

906
	if (flags & TEST_ACTIVE) {
907
		err = hang_init(&h, gt);
908 909
		if (err)
			return err;
910 911

		if (flags & TEST_PRIORITY)
912
			h.ctx->sched.priority = 1024;
913 914
	}

915
	for_each_engine(engine, gt, id) {
916
		struct active_engine threads[I915_NUM_ENGINES] = {};
917
		unsigned long device = i915_reset_count(global);
918
		unsigned long count = 0, reported;
919 920
		IGT_TIMEOUT(end_time);

921 922
		if (flags & TEST_ACTIVE &&
		    !intel_engine_can_store_dword(engine))
923 924
			continue;

925 926 927 928 929 930 931
		if (!wait_for_idle(engine)) {
			pr_err("i915_reset_engine(%s:%s): failed to idle before reset\n",
			       engine->name, test_name);
			err = -EIO;
			break;
		}

932
		memset(threads, 0, sizeof(threads));
933
		for_each_engine(other, gt, tmp) {
934 935
			struct task_struct *tsk;

936
			threads[tmp].resets =
937
				i915_reset_engine_count(global, other);
938

939
			if (other == engine && !(flags & TEST_SELF))
940 941
				continue;

942
			if (other != engine && !(flags & TEST_OTHERS))
943 944 945 946 947 948
				continue;

			threads[tmp].engine = other;
			threads[tmp].flags = flags;

			tsk = kthread_run(active_engine, &threads[tmp],
949
					  "igt/%s", other->name);
950 951 952 953 954
			if (IS_ERR(tsk)) {
				err = PTR_ERR(tsk);
				goto unwind;
			}

955
			threads[tmp].task = tsk;
956 957 958
			get_task_struct(tsk);
		}

959 960
		yield(); /* start all threads before we begin */

961
		st_engine_heartbeat_disable(engine);
962
		set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
963
		do {
964
			struct i915_request *rq = NULL;
965

966
			if (flags & TEST_ACTIVE) {
967
				rq = hang_create_request(&h, engine);
968 969 970 971 972
				if (IS_ERR(rq)) {
					err = PTR_ERR(rq);
					break;
				}

973
				i915_request_get(rq);
974
				i915_request_add(rq);
975

976
				if (!wait_until_running(&h, rq)) {
977
					struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
978

979
					pr_err("%s: Failed to start request %llx, at %x\n",
980 981 982 983
					       __func__, rq->fence.seqno, hws_seqno(&h, rq));
					intel_engine_dump(engine, &p,
							  "%s\n", engine->name);

984
					i915_request_put(rq);
985 986 987 988 989
					err = -EIO;
					break;
				}
			}

990
			err = intel_engine_reset(engine, NULL);
991
			if (err) {
992 993
				pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
				       engine->name, test_name, err);
994 995
				break;
			}
996 997

			count++;
998 999

			if (rq) {
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
				if (rq->fence.error != -EIO) {
					pr_err("i915_reset_engine(%s:%s):"
					       " failed to reset request %llx:%lld\n",
					       engine->name, test_name,
					       rq->fence.context,
					       rq->fence.seqno);
					i915_request_put(rq);

					GEM_TRACE_DUMP();
					intel_gt_set_wedged(gt);
					err = -EIO;
					break;
				}

1014 1015
				if (i915_request_wait(rq, 0, HZ / 5) < 0) {
					struct drm_printer p =
1016
						drm_info_printer(gt->i915->drm.dev);
1017 1018

					pr_err("i915_reset_engine(%s:%s):"
1019 1020 1021 1022
					       " failed to complete request %llx:%lld after reset\n",
					       engine->name, test_name,
					       rq->fence.context,
					       rq->fence.seqno);
1023 1024 1025 1026 1027
					intel_engine_dump(engine, &p,
							  "%s\n", engine->name);
					i915_request_put(rq);

					GEM_TRACE_DUMP();
1028
					intel_gt_set_wedged(gt);
1029 1030 1031 1032
					err = -EIO;
					break;
				}

1033 1034
				i915_request_put(rq);
			}
1035 1036 1037

			if (!(flags & TEST_SELF) && !wait_for_idle(engine)) {
				struct drm_printer p =
1038
					drm_info_printer(gt->i915->drm.dev);
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048

				pr_err("i915_reset_engine(%s:%s):"
				       " failed to idle after reset\n",
				       engine->name, test_name);
				intel_engine_dump(engine, &p,
						  "%s\n", engine->name);

				err = -EIO;
				break;
			}
1049
		} while (time_before(jiffies, end_time));
1050
		clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
1051
		st_engine_heartbeat_enable(engine);
1052

1053
		pr_info("i915_reset_engine(%s:%s): %lu resets\n",
1054 1055
			engine->name, test_name, count);

1056
		reported = i915_reset_engine_count(global, engine);
1057
		reported -= threads[engine->id].resets;
1058 1059 1060
		if (reported != count) {
			pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
			       engine->name, test_name, count, reported);
1061 1062 1063
			if (!err)
				err = -EINVAL;
		}
1064 1065

unwind:
1066
		for_each_engine(other, gt, tmp) {
1067 1068
			int ret;

1069
			if (!threads[tmp].task)
1070 1071
				continue;

1072
			ret = kthread_stop(threads[tmp].task);
1073
			if (ret) {
1074 1075
				pr_err("kthread for other engine %s failed, err=%d\n",
				       other->name, ret);
1076 1077 1078
				if (!err)
					err = ret;
			}
1079
			put_task_struct(threads[tmp].task);
1080

1081
			if (other->uabi_class != engine->uabi_class &&
1082
			    threads[tmp].resets !=
1083
			    i915_reset_engine_count(global, other)) {
1084
				pr_err("Innocent engine %s was reset (count=%ld)\n",
1085
				       other->name,
1086
				       i915_reset_engine_count(global, other) -
1087
				       threads[tmp].resets);
1088 1089
				if (!err)
					err = -EINVAL;
1090 1091 1092
			}
		}

1093
		if (device != i915_reset_count(global)) {
1094
			pr_err("Global reset (count=%ld)!\n",
1095
			       i915_reset_count(global) - device);
1096 1097
			if (!err)
				err = -EINVAL;
1098 1099 1100 1101 1102
		}

		if (err)
			break;

1103
		err = igt_flush_test(gt->i915);
1104 1105
		if (err)
			break;
1106 1107
	}

1108
	if (intel_gt_is_wedged(gt))
1109 1110
		err = -EIO;

1111
	if (flags & TEST_ACTIVE)
1112 1113
		hang_fini(&h);

1114 1115 1116
	return err;
}

1117
static int igt_reset_engines(void *arg)
1118
{
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
	static const struct {
		const char *name;
		unsigned int flags;
	} phases[] = {
		{ "idle", 0 },
		{ "active", TEST_ACTIVE },
		{ "others-idle", TEST_OTHERS },
		{ "others-active", TEST_OTHERS | TEST_ACTIVE },
		{
			"others-priority",
			TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY
		},
		{
			"self-priority",
1133
			TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
1134 1135 1136
		},
		{ }
	};
1137
	struct intel_gt *gt = arg;
1138 1139
	typeof(*phases) *p;
	int err;
1140

1141 1142
	for (p = phases; p->name; p++) {
		if (p->flags & TEST_PRIORITY) {
1143
			if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1144 1145 1146 1147 1148 1149 1150 1151 1152
				continue;
		}

		err = __igt_reset_engines(arg, p->name, p->flags);
		if (err)
			return err;
	}

	return 0;
1153 1154
}

1155
static u32 fake_hangcheck(struct intel_gt *gt, intel_engine_mask_t mask)
1156
{
1157
	u32 count = i915_reset_count(&gt->i915->gpu_error);
1158

1159
	intel_gt_reset(gt, mask, NULL);
1160

1161
	return count;
1162 1163
}

1164
static int igt_reset_wait(void *arg)
1165
{
1166 1167
	struct intel_gt *gt = arg;
	struct i915_gpu_error *global = &gt->i915->gpu_error;
1168
	struct intel_engine_cs *engine = gt->engine[RCS0];
1169
	struct i915_request *rq;
1170 1171 1172 1173 1174
	unsigned int reset_count;
	struct hang h;
	long timeout;
	int err;

1175
	if (!engine || !intel_engine_can_store_dword(engine))
1176 1177
		return 0;

1178 1179
	/* Check that we detect a stuck waiter and issue a reset */

1180
	igt_global_reset_lock(gt);
1181

1182
	err = hang_init(&h, gt);
1183 1184 1185
	if (err)
		goto unlock;

1186
	rq = hang_create_request(&h, engine);
1187 1188 1189 1190 1191
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto fini;
	}

1192
	i915_request_get(rq);
1193
	i915_request_add(rq);
1194

1195
	if (!wait_until_running(&h, rq)) {
1196
		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1197

1198
		pr_err("%s: Failed to start request %llx, at %x\n",
1199
		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
1200
		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1201

1202
		intel_gt_set_wedged(gt);
1203

1204 1205 1206 1207
		err = -EIO;
		goto out_rq;
	}

1208
	reset_count = fake_hangcheck(gt, ALL_ENGINES);
1209

1210
	timeout = i915_request_wait(rq, 0, 10);
1211
	if (timeout < 0) {
1212
		pr_err("i915_request_wait failed on a stuck request: err=%ld\n",
1213 1214 1215 1216 1217
		       timeout);
		err = timeout;
		goto out_rq;
	}

1218
	if (i915_reset_count(global) == reset_count) {
1219 1220 1221 1222 1223 1224
		pr_err("No GPU reset recorded!\n");
		err = -EINVAL;
		goto out_rq;
	}

out_rq:
1225
	i915_request_put(rq);
1226 1227 1228
fini:
	hang_fini(&h);
unlock:
1229
	igt_global_reset_unlock(gt);
1230

1231
	if (intel_gt_is_wedged(gt))
1232 1233 1234 1235 1236
		return -EIO;

	return err;
}

1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
struct evict_vma {
	struct completion completion;
	struct i915_vma *vma;
};

static int evict_vma(void *data)
{
	struct evict_vma *arg = data;
	struct i915_address_space *vm = arg->vma->vm;
	struct drm_mm_node evict = arg->vma->node;
	int err;

	complete(&arg->completion);

1251
	mutex_lock(&vm->mutex);
1252
	err = i915_gem_evict_for_node(vm, &evict, 0);
1253
	mutex_unlock(&vm->mutex);
1254 1255 1256 1257

	return err;
}

1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
static int evict_fence(void *data)
{
	struct evict_vma *arg = data;
	int err;

	complete(&arg->completion);

	/* Mark the fence register as dirty to force the mmio update. */
	err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
	if (err) {
		pr_err("Invalid Y-tiling settings; err:%d\n", err);
1269
		return err;
1270 1271
	}

1272 1273 1274
	err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
	if (err) {
		pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
1275
		return err;
1276 1277
	}

1278
	err = i915_vma_pin_fence(arg->vma);
1279
	i915_vma_unpin(arg->vma);
1280 1281
	if (err) {
		pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
1282
		return err;
1283 1284 1285 1286
	}

	i915_vma_unpin_fence(arg->vma);

1287
	return 0;
1288 1289
}

1290
static int __igt_reset_evict_vma(struct intel_gt *gt,
1291 1292 1293
				 struct i915_address_space *vm,
				 int (*fn)(void *),
				 unsigned int flags)
1294
{
1295
	struct intel_engine_cs *engine = gt->engine[RCS0];
1296 1297 1298 1299 1300
	struct drm_i915_gem_object *obj;
	struct task_struct *tsk = NULL;
	struct i915_request *rq;
	struct evict_vma arg;
	struct hang h;
1301
	unsigned int pin_flags;
1302 1303
	int err;

1304 1305 1306
	if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
		return 0;

1307
	if (!engine || !intel_engine_can_store_dword(engine))
1308 1309 1310 1311
		return 0;

	/* Check that we can recover an unbind stuck on a hanging request */

1312
	err = hang_init(&h, gt);
1313
	if (err)
1314
		return err;
1315

1316
	obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
1317 1318 1319 1320 1321
	if (IS_ERR(obj)) {
		err = PTR_ERR(obj);
		goto fini;
	}

1322 1323 1324 1325 1326 1327 1328 1329
	if (flags & EXEC_OBJECT_NEEDS_FENCE) {
		err = i915_gem_object_set_tiling(obj, I915_TILING_X, 512);
		if (err) {
			pr_err("Invalid X-tiling settings; err:%d\n", err);
			goto out_obj;
		}
	}

1330 1331 1332 1333 1334 1335
	arg.vma = i915_vma_instance(obj, vm, NULL);
	if (IS_ERR(arg.vma)) {
		err = PTR_ERR(arg.vma);
		goto out_obj;
	}

1336
	rq = hang_create_request(&h, engine);
1337 1338 1339 1340 1341
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto out_obj;
	}

1342 1343 1344 1345 1346 1347
	pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER;

	if (flags & EXEC_OBJECT_NEEDS_FENCE)
		pin_flags |= PIN_MAPPABLE;

	err = i915_vma_pin(arg.vma, 0, 0, pin_flags);
1348 1349
	if (err) {
		i915_request_add(rq);
1350
		goto out_obj;
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
	}

	if (flags & EXEC_OBJECT_NEEDS_FENCE) {
		err = i915_vma_pin_fence(arg.vma);
		if (err) {
			pr_err("Unable to pin X-tiled fence; err:%d\n", err);
			i915_vma_unpin(arg.vma);
			i915_request_add(rq);
			goto out_obj;
		}
	}
1362

1363
	i915_vma_lock(arg.vma);
1364 1365 1366 1367
	err = i915_request_await_object(rq, arg.vma->obj,
					flags & EXEC_OBJECT_WRITE);
	if (err == 0)
		err = i915_vma_move_to_active(arg.vma, rq, flags);
1368
	i915_vma_unlock(arg.vma);
1369 1370 1371

	if (flags & EXEC_OBJECT_NEEDS_FENCE)
		i915_vma_unpin_fence(arg.vma);
1372 1373 1374 1375 1376 1377 1378 1379
	i915_vma_unpin(arg.vma);

	i915_request_get(rq);
	i915_request_add(rq);
	if (err)
		goto out_rq;

	if (!wait_until_running(&h, rq)) {
1380
		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1381

1382
		pr_err("%s: Failed to start request %llx, at %x\n",
1383 1384 1385
		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);

1386
		intel_gt_set_wedged(gt);
1387 1388 1389 1390 1391
		goto out_reset;
	}

	init_completion(&arg.completion);

1392
	tsk = kthread_run(fn, &arg, "igt/evict_vma");
1393 1394 1395 1396 1397
	if (IS_ERR(tsk)) {
		err = PTR_ERR(tsk);
		tsk = NULL;
		goto out_reset;
	}
1398
	get_task_struct(tsk);
1399 1400 1401

	wait_for_completion(&arg.completion);

1402
	if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
1403
		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1404 1405 1406 1407

		pr_err("igt/evict_vma kthread did not wait\n");
		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);

1408
		intel_gt_set_wedged(gt);
1409 1410 1411 1412
		goto out_reset;
	}

out_reset:
1413 1414 1415
	igt_global_reset_lock(gt);
	fake_hangcheck(gt, rq->engine->mask);
	igt_global_reset_unlock(gt);
1416 1417

	if (tsk) {
1418
		struct intel_wedge_me w;
1419 1420

		/* The reset, even indirectly, should take less than 10ms. */
1421
		intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
1422
			err = kthread_stop(tsk);
1423 1424

		put_task_struct(tsk);
1425 1426 1427 1428 1429 1430 1431 1432
	}

out_rq:
	i915_request_put(rq);
out_obj:
	i915_gem_object_put(obj);
fini:
	hang_fini(&h);
1433
	if (intel_gt_is_wedged(gt))
1434 1435 1436 1437 1438 1439 1440
		return -EIO;

	return err;
}

static int igt_reset_evict_ggtt(void *arg)
{
1441
	struct intel_gt *gt = arg;
1442

1443
	return __igt_reset_evict_vma(gt, &gt->ggtt->vm,
1444
				     evict_vma, EXEC_OBJECT_WRITE);
1445 1446 1447 1448
}

static int igt_reset_evict_ppgtt(void *arg)
{
1449
	struct intel_gt *gt = arg;
1450
	struct i915_ppgtt *ppgtt;
1451 1452
	int err;

1453 1454 1455
	/* aliasing == global gtt locking, covered above */
	if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL)
		return 0;
1456

1457
	ppgtt = i915_ppgtt_create(gt);
1458 1459
	if (IS_ERR(ppgtt))
		return PTR_ERR(ppgtt);
1460

1461 1462 1463
	err = __igt_reset_evict_vma(gt, &ppgtt->vm,
				    evict_vma, EXEC_OBJECT_WRITE);
	i915_vm_put(&ppgtt->vm);
1464 1465 1466 1467

	return err;
}

1468 1469
static int igt_reset_evict_fence(void *arg)
{
1470
	struct intel_gt *gt = arg;
1471

1472
	return __igt_reset_evict_vma(gt, &gt->ggtt->vm,
1473 1474 1475
				     evict_fence, EXEC_OBJECT_NEEDS_FENCE);
}

1476
static int wait_for_others(struct intel_gt *gt,
1477 1478 1479 1480 1481
			   struct intel_engine_cs *exclude)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

1482
	for_each_engine(engine, gt, id) {
1483 1484 1485
		if (engine == exclude)
			continue;

1486
		if (!wait_for_idle(engine))
1487 1488 1489 1490 1491 1492
			return -EIO;
	}

	return 0;
}

1493 1494
static int igt_reset_queue(void *arg)
{
1495 1496
	struct intel_gt *gt = arg;
	struct i915_gpu_error *global = &gt->i915->gpu_error;
1497 1498 1499 1500 1501 1502 1503
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	struct hang h;
	int err;

	/* Check that we replay pending requests following a hang */

1504
	igt_global_reset_lock(gt);
1505

1506
	err = hang_init(&h, gt);
1507 1508 1509
	if (err)
		goto unlock;

1510
	for_each_engine(engine, gt, id) {
1511
		struct i915_request *prev;
1512 1513 1514
		IGT_TIMEOUT(end_time);
		unsigned int count;

1515 1516 1517
		if (!intel_engine_can_store_dword(engine))
			continue;

1518
		prev = hang_create_request(&h, engine);
1519 1520 1521 1522 1523
		if (IS_ERR(prev)) {
			err = PTR_ERR(prev);
			goto fini;
		}

1524
		i915_request_get(prev);
1525
		i915_request_add(prev);
1526 1527 1528

		count = 0;
		do {
1529
			struct i915_request *rq;
1530 1531
			unsigned int reset_count;

1532
			rq = hang_create_request(&h, engine);
1533 1534 1535 1536 1537
			if (IS_ERR(rq)) {
				err = PTR_ERR(rq);
				goto fini;
			}

1538
			i915_request_get(rq);
1539
			i915_request_add(rq);
1540

1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
			/*
			 * XXX We don't handle resetting the kernel context
			 * very well. If we trigger a device reset twice in
			 * quick succession while the kernel context is
			 * executing, we may end up skipping the breadcrumb.
			 * This is really only a problem for the selftest as
			 * normally there is a large interlude between resets
			 * (hangcheck), or we focus on resetting just one
			 * engine and so avoid repeatedly resetting innocents.
			 */
1551
			err = wait_for_others(gt, engine);
1552 1553 1554 1555 1556 1557 1558
			if (err) {
				pr_err("%s(%s): Failed to idle other inactive engines after device reset\n",
				       __func__, engine->name);
				i915_request_put(rq);
				i915_request_put(prev);

				GEM_TRACE_DUMP();
1559
				intel_gt_set_wedged(gt);
1560 1561 1562
				goto fini;
			}

1563
			if (!wait_until_running(&h, prev)) {
1564
				struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1565

1566
				pr_err("%s(%s): Failed to start request %llx, at %x\n",
1567 1568 1569 1570
				       __func__, engine->name,
				       prev->fence.seqno, hws_seqno(&h, prev));
				intel_engine_dump(engine, &p,
						  "%s\n", engine->name);
1571

1572 1573
				i915_request_put(rq);
				i915_request_put(prev);
1574

1575
				intel_gt_set_wedged(gt);
1576

1577 1578 1579 1580
				err = -EIO;
				goto fini;
			}

1581
			reset_count = fake_hangcheck(gt, BIT(id));
1582

1583 1584 1585
			if (prev->fence.error != -EIO) {
				pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
				       prev->fence.error);
1586 1587
				i915_request_put(rq);
				i915_request_put(prev);
1588 1589 1590 1591 1592 1593 1594
				err = -EINVAL;
				goto fini;
			}

			if (rq->fence.error) {
				pr_err("Fence error status not zero [%d] after unrelated reset\n",
				       rq->fence.error);
1595 1596
				i915_request_put(rq);
				i915_request_put(prev);
1597 1598 1599 1600
				err = -EINVAL;
				goto fini;
			}

1601
			if (i915_reset_count(global) == reset_count) {
1602
				pr_err("No GPU reset recorded!\n");
1603 1604
				i915_request_put(rq);
				i915_request_put(prev);
1605 1606 1607 1608
				err = -EINVAL;
				goto fini;
			}

1609
			i915_request_put(prev);
1610 1611 1612
			prev = rq;
			count++;
		} while (time_before(jiffies, end_time));
1613 1614
		pr_info("%s: Completed %d queued resets\n",
			engine->name, count);
1615 1616

		*h.batch = MI_BATCH_BUFFER_END;
1617
		intel_gt_chipset_flush(engine->gt);
1618

1619
		i915_request_put(prev);
1620

1621
		err = igt_flush_test(gt->i915);
1622 1623
		if (err)
			break;
1624 1625 1626 1627 1628
	}

fini:
	hang_fini(&h);
unlock:
1629
	igt_global_reset_unlock(gt);
1630

1631
	if (intel_gt_is_wedged(gt))
1632 1633 1634 1635 1636
		return -EIO;

	return err;
}

1637
static int igt_handle_error(void *arg)
1638
{
1639 1640
	struct intel_gt *gt = arg;
	struct i915_gpu_error *global = &gt->i915->gpu_error;
1641
	struct intel_engine_cs *engine = gt->engine[RCS0];
1642
	struct hang h;
1643
	struct i915_request *rq;
1644
	struct i915_gpu_coredump *error;
1645
	int err;
1646 1647 1648

	/* Check that we can issue a global GPU and engine reset */

1649
	if (!intel_has_reset_engine(gt))
1650 1651
		return 0;

1652
	if (!engine || !intel_engine_can_store_dword(engine))
1653 1654
		return 0;

1655
	err = hang_init(&h, gt);
1656
	if (err)
1657
		return err;
1658

1659
	rq = hang_create_request(&h, engine);
1660 1661
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
1662
		goto err_fini;
1663 1664
	}

1665
	i915_request_get(rq);
1666
	i915_request_add(rq);
1667

1668
	if (!wait_until_running(&h, rq)) {
1669
		struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
1670

1671
		pr_err("%s: Failed to start request %llx, at %x\n",
1672
		       __func__, rq->fence.seqno, hws_seqno(&h, rq));
1673
		intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1674

1675
		intel_gt_set_wedged(gt);
1676

1677
		err = -EIO;
1678
		goto err_request;
1679 1680
	}

1681
	/* Temporarily disable error capture */
1682
	error = xchg(&global->first_error, (void *)-1);
1683

1684
	intel_gt_handle_error(gt, engine->mask, 0, NULL);
1685

1686
	xchg(&global->first_error, error);
1687

1688 1689 1690 1691 1692
	if (rq->fence.error != -EIO) {
		pr_err("Guilty request not identified!\n");
		err = -EINVAL;
		goto err_request;
	}
1693 1694

err_request:
1695
	i915_request_put(rq);
1696 1697
err_fini:
	hang_fini(&h);
1698
	return err;
1699 1700
}

1701
static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
1702
				     const struct igt_atomic_section *p,
1703 1704 1705 1706 1707 1708 1709 1710
				     const char *mode)
{
	struct tasklet_struct * const t = &engine->execlists.tasklet;
	int err;

	GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
		  engine->name, mode, p->name);

1711 1712
	if (t->func)
		tasklet_disable(t);
1713 1714
	if (strcmp(p->name, "softirq"))
		local_bh_disable();
1715 1716
	p->critical_section_begin();

1717
	err = __intel_engine_reset_bh(engine, NULL);
1718 1719

	p->critical_section_end();
1720 1721
	if (strcmp(p->name, "softirq"))
		local_bh_enable();
1722 1723 1724 1725
	if (t->func) {
		tasklet_enable(t);
		tasklet_hi_schedule(t);
	}
1726 1727 1728 1729 1730 1731 1732 1733 1734

	if (err)
		pr_err("i915_reset_engine(%s:%s) failed under %s\n",
		       engine->name, mode, p->name);

	return err;
}

static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
1735
				   const struct igt_atomic_section *p)
1736 1737 1738 1739 1740 1741 1742 1743 1744
{
	struct i915_request *rq;
	struct hang h;
	int err;

	err = __igt_atomic_reset_engine(engine, p, "idle");
	if (err)
		return err;

1745
	err = hang_init(&h, engine->gt);
1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
	if (err)
		return err;

	rq = hang_create_request(&h, engine);
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto out;
	}

	i915_request_get(rq);
	i915_request_add(rq);

	if (wait_until_running(&h, rq)) {
		err = __igt_atomic_reset_engine(engine, p, "active");
	} else {
		pr_err("%s(%s): Failed to start request %llx, at %x\n",
		       __func__, engine->name,
		       rq->fence.seqno, hws_seqno(&h, rq));
1764
		intel_gt_set_wedged(engine->gt);
1765 1766 1767 1768
		err = -EIO;
	}

	if (err == 0) {
1769
		struct intel_wedge_me w;
1770

1771
		intel_wedge_on_timeout(&w, engine->gt, HZ / 20 /* 50ms */)
1772
			i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
1773
		if (intel_gt_is_wedged(engine->gt))
1774 1775 1776 1777 1778 1779 1780 1781 1782
			err = -EIO;
	}

	i915_request_put(rq);
out:
	hang_fini(&h);
	return err;
}

1783
static int igt_reset_engines_atomic(void *arg)
1784
{
1785
	struct intel_gt *gt = arg;
1786
	const typeof(*igt_atomic_phases) *p;
1787 1788
	int err = 0;

1789 1790
	/* Check that the engines resets are usable from atomic context */

1791
	if (!intel_has_reset_engine(gt))
1792 1793
		return 0;

1794
	if (intel_uc_uses_guc_submission(&gt->uc))
1795
		return 0;
1796

1797
	igt_global_reset_lock(gt);
1798 1799

	/* Flush any requests before we get started and check basics */
1800
	if (!igt_force_reset(gt))
1801 1802
		goto unlock;

1803
	for (p = igt_atomic_phases; p->name; p++) {
1804 1805 1806
		struct intel_engine_cs *engine;
		enum intel_engine_id id;

1807
		for_each_engine(engine, gt, id) {
1808 1809 1810
			err = igt_atomic_reset_engine(engine, p);
			if (err)
				goto out;
1811 1812 1813 1814 1815
		}
	}

out:
	/* As we poke around the guts, do a full reset before continuing. */
1816
	igt_force_reset(gt);
1817
unlock:
1818
	igt_global_reset_unlock(gt);
1819 1820 1821 1822

	return err;
}

1823 1824 1825 1826
int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
{
	static const struct i915_subtest tests[] = {
		SUBTEST(igt_hang_sanitycheck),
1827 1828
		SUBTEST(igt_reset_nop),
		SUBTEST(igt_reset_nop_engine),
1829 1830
		SUBTEST(igt_reset_idle_engine),
		SUBTEST(igt_reset_active_engine),
1831
		SUBTEST(igt_reset_fail_engine),
1832
		SUBTEST(igt_reset_engines),
1833
		SUBTEST(igt_reset_engines_atomic),
1834
		SUBTEST(igt_reset_queue),
1835 1836 1837
		SUBTEST(igt_reset_wait),
		SUBTEST(igt_reset_evict_ggtt),
		SUBTEST(igt_reset_evict_ppgtt),
1838
		SUBTEST(igt_reset_evict_fence),
1839
		SUBTEST(igt_handle_error),
1840
	};
1841
	struct intel_gt *gt = &i915->gt;
1842
	intel_wakeref_t wakeref;
1843
	int err;
1844

1845
	if (!intel_has_gpu_reset(gt))
1846 1847
		return 0;

1848
	if (intel_gt_is_wedged(gt))
1849 1850
		return -EIO; /* we're long past hope of a successful reset */

1851
	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1852

1853
	err = intel_gt_live_subtests(tests, gt);
1854

1855
	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1856 1857

	return err;
1858
}