xe_bo.c 53.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2021 Intel Corporation
 */

#include "xe_bo.h"

#include <linux/dma-buf.h>

#include <drm/drm_drv.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/xe_drm.h>

#include "xe_device.h"
#include "xe_dma_buf.h"
19
#include "xe_drm_client.h"
20 21 22 23 24 25 26
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_map.h"
#include "xe_migrate.h"
#include "xe_preempt_fence.h"
#include "xe_res_cursor.h"
#include "xe_trace.h"
27
#include "xe_ttm_stolen_mgr.h"
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
#include "xe_vm.h"

static const struct ttm_place sys_placement_flags = {
	.fpfn = 0,
	.lpfn = 0,
	.mem_type = XE_PL_SYSTEM,
	.flags = 0,
};

static struct ttm_placement sys_placement = {
	.num_placement = 1,
	.placement = &sys_placement_flags,
	.num_busy_placement = 1,
	.busy_placement = &sys_placement_flags,
};

44 45 46 47 48 49 50 51 52 53 54 55 56 57
static const struct ttm_place tt_placement_flags = {
	.fpfn = 0,
	.lpfn = 0,
	.mem_type = XE_PL_TT,
	.flags = 0,
};

static struct ttm_placement tt_placement = {
	.num_placement = 1,
	.placement = &tt_placement_flags,
	.num_busy_placement = 1,
	.busy_placement = &sys_placement_flags,
};

58 59
bool mem_type_is_vram(u32 mem_type)
{
60 61 62 63 64 65
	return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN;
}

static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res)
{
	return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe);
66 67 68 69 70 71 72 73 74
}

static bool resource_is_vram(struct ttm_resource *res)
{
	return mem_type_is_vram(res->mem_type);
}

bool xe_bo_is_vram(struct xe_bo *bo)
{
75 76 77 78 79 80 81
	return resource_is_vram(bo->ttm.resource) ||
		resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource);
}

bool xe_bo_is_stolen(struct xe_bo *bo)
{
	return bo->ttm.resource->mem_type == XE_PL_STOLEN;
82 83
}

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
/**
 * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR
 * @bo: The BO
 *
 * The stolen memory is accessed through the PCI BAR for both DGFX and some
 * integrated platforms that have a dedicated bit in the PTE for devmem (DM).
 *
 * Returns: true if it's stolen memory accessed via PCI BAR, false otherwise.
 */
bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
{
	return xe_bo_is_stolen(bo) &&
		GRAPHICS_VERx100(xe_bo_device(bo)) >= 1270;
}

99 100 101 102 103
static bool xe_bo_is_user(struct xe_bo *bo)
{
	return bo->flags & XE_BO_CREATE_USER_BIT;
}

104 105
static struct xe_tile *
mem_type_to_tile(struct xe_device *xe, u32 mem_type)
106
{
107
	xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type));
108

109
	return &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
110 111
}

112
/**
113
 * xe_bo_to_tile() - Get a tile from a BO's memory location
114 115
 * @bo: The buffer object
 *
116
 * Get a tile from a BO's memory location, should be called on BOs in VRAM only.
117
 *
118
 * Return: xe_tile object which is closest to the BO
119
 */
120
struct xe_tile *xe_bo_to_tile(struct xe_bo *bo)
121
{
122
	return mem_type_to_tile(xe_bo_device(bo), bo->ttm.resource->mem_type);
123 124
}

125 126 127 128 129 130 131 132 133 134 135 136 137 138
static void try_add_system(struct xe_bo *bo, struct ttm_place *places,
			   u32 bo_flags, u32 *c)
{
	if (bo_flags & XE_BO_CREATE_SYSTEM_BIT) {
		places[*c] = (struct ttm_place) {
			.mem_type = XE_PL_TT,
		};
		*c += 1;

		if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
			bo->props.preferred_mem_type = XE_PL_TT;
	}
}

139 140
static void add_vram(struct xe_device *xe, struct xe_bo *bo,
		     struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
141
{
142
	struct xe_tile *tile = mem_type_to_tile(xe, mem_type);
143 144
	struct ttm_place place = { .mem_type = mem_type };
	u64 io_size = tile->mem.vram.io_size;
145

146
	xe_assert(xe, tile->mem.vram.usable_size);
147

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
	/*
	 * For eviction / restore on suspend / resume objects
	 * pinned in VRAM must be contiguous
	 */
	if (bo_flags & (XE_BO_CREATE_PINNED_BIT |
			XE_BO_CREATE_GGTT_BIT))
		place.flags |= TTM_PL_FLAG_CONTIGUOUS;

	if (io_size < tile->mem.vram.usable_size) {
		if (bo_flags & XE_BO_NEEDS_CPU_ACCESS) {
			place.fpfn = 0;
			place.lpfn = io_size >> PAGE_SHIFT;
		} else {
			place.flags |= TTM_PL_FLAG_TOPDOWN;
		}
	}
	places[*c] = place;
165
	*c += 1;
166

167 168
	if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)
		bo->props.preferred_mem_type = mem_type;
169 170
}

171 172
static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
			 struct ttm_place *places, u32 bo_flags, u32 *c)
173
{
174 175 176 177 178 179 180 181 182 183
	if (bo->props.preferred_gt == XE_GT1) {
		if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
			add_vram(xe, bo, places, bo_flags, XE_PL_VRAM1, c);
		if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
			add_vram(xe, bo, places, bo_flags, XE_PL_VRAM0, c);
	} else {
		if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
			add_vram(xe, bo, places, bo_flags, XE_PL_VRAM0, c);
		if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
			add_vram(xe, bo, places, bo_flags, XE_PL_VRAM1, c);
184 185 186
	}
}

187 188 189 190 191 192 193 194 195 196 197 198 199 200
static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
			   struct ttm_place *places, u32 bo_flags, u32 *c)
{
	if (bo_flags & XE_BO_CREATE_STOLEN_BIT) {
		places[*c] = (struct ttm_place) {
			.mem_type = XE_PL_STOLEN,
			.flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
					     XE_BO_CREATE_GGTT_BIT) ?
				TTM_PL_FLAG_CONTIGUOUS : 0,
		};
		*c += 1;
	}
}

201 202 203 204 205 206 207 208 209 210 211 212
static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
				       u32 bo_flags)
{
	struct ttm_place *places = bo->placements;
	u32 c = 0;

	bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;

	/* The order of placements should indicate preferred location */

	if (bo->props.preferred_mem_class == XE_MEM_REGION_CLASS_SYSMEM) {
		try_add_system(bo, places, bo_flags, &c);
213
		try_add_vram(xe, bo, places, bo_flags, &c);
214
	} else {
215
		try_add_vram(xe, bo, places, bo_flags, &c);
216 217
		try_add_system(bo, places, bo_flags, &c);
	}
218
	try_add_stolen(xe, bo, places, bo_flags, &c);
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265

	if (!c)
		return -EINVAL;

	bo->placement = (struct ttm_placement) {
		.num_placement = c,
		.placement = places,
		.num_busy_placement = c,
		.busy_placement = places,
	};

	return 0;
}

int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
			      u32 bo_flags)
{
	xe_bo_assert_held(bo);
	return __xe_bo_placement_for_flags(xe, bo, bo_flags);
}

static void xe_evict_flags(struct ttm_buffer_object *tbo,
			   struct ttm_placement *placement)
{
	struct xe_bo *bo;

	if (!xe_bo_is_xe_bo(tbo)) {
		/* Don't handle scatter gather BOs */
		if (tbo->type == ttm_bo_type_sg) {
			placement->num_placement = 0;
			placement->num_busy_placement = 0;
			return;
		}

		*placement = sys_placement;
		return;
	}

	/*
	 * For xe, sg bos that are evicted to system just triggers a
	 * rebind of the sg list upon subsequent validation to XE_PL_TT.
	 */

	bo = ttm_to_xe_bo(tbo);
	switch (tbo->resource->mem_type) {
	case XE_PL_VRAM0:
	case XE_PL_VRAM1:
266
	case XE_PL_STOLEN:
267 268
		*placement = tt_placement;
		break;
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
	case XE_PL_TT:
	default:
		*placement = sys_placement;
		break;
	}
}

struct xe_ttm_tt {
	struct ttm_tt ttm;
	struct device *dev;
	struct sg_table sgt;
	struct sg_table *sg;
};

static int xe_tt_map_sg(struct ttm_tt *tt)
{
	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
	unsigned long num_pages = tt->num_pages;
	int ret;

289
	XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
290 291 292 293

	if (xe_tt->sg)
		return 0;

294 295 296 297 298
	ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
						num_pages, 0,
						(u64)num_pages << PAGE_SHIFT,
						xe_sg_segment_size(xe_tt->dev),
						GFP_KERNEL);
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
	if (ret)
		return ret;

	xe_tt->sg = &xe_tt->sgt;
	ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL,
			      DMA_ATTR_SKIP_CPU_SYNC);
	if (ret) {
		sg_free_table(xe_tt->sg);
		xe_tt->sg = NULL;
		return ret;
	}

	return 0;
}

struct sg_table *xe_bo_get_sg(struct xe_bo *bo)
{
	struct ttm_tt *tt = bo->ttm.ttm;
	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);

	return xe_tt->sg;
}

static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
				       u32 page_flags)
{
	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
	struct xe_device *xe = xe_bo_device(bo);
	struct xe_ttm_tt *tt;
328
	unsigned long extra_pages;
329
	enum ttm_caching caching = ttm_cached;
330 331 332 333 334 335 336 337
	int err;

	tt = kzalloc(sizeof(*tt), GFP_KERNEL);
	if (!tt)
		return NULL;

	tt->dev = xe->drm.dev;

338 339 340 341 342
	extra_pages = 0;
	if (xe_bo_needs_ccs_pages(bo))
		extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size),
					   PAGE_SIZE);

343 344 345 346 347 348 349 350 351 352 353
	/*
	 * Display scanout is always non-coherent with the CPU cache.
	 *
	 * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and
	 * require a CPU:WC mapping.
	 */
	if (bo->flags & XE_BO_SCANOUT_BIT ||
	    (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_PAGETABLE))
		caching = ttm_write_combined;

	err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
	if (err) {
		kfree(tt);
		return NULL;
	}

	return &tt->ttm;
}

static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
			      struct ttm_operation_ctx *ctx)
{
	int err;

	/*
	 * dma-bufs are not populated with pages, and the dma-
	 * addresses are set up when moved to XE_PL_TT.
	 */
	if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
		return 0;

	err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
	if (err)
		return err;

	/* A follow up may move this xe_bo_move when BO is moved to XE_PL_TT */
	err = xe_tt_map_sg(tt);
	if (err)
		ttm_pool_free(&ttm_dev->pool, tt);

	return err;
}

static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
{
	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);

	if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
		return;

	if (xe_tt->sg) {
		dma_unmap_sgtable(xe_tt->dev, xe_tt->sg,
				  DMA_BIDIRECTIONAL, 0);
		sg_free_table(xe_tt->sg);
		xe_tt->sg = NULL;
	}

	return ttm_pool_free(&ttm_dev->pool, tt);
}

static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
{
	ttm_tt_fini(tt);
	kfree(tt);
}

static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
				 struct ttm_resource *mem)
{
	struct xe_device *xe = ttm_to_xe_device(bdev);

	switch (mem->mem_type) {
	case XE_PL_SYSTEM:
	case XE_PL_TT:
		return 0;
	case XE_PL_VRAM0:
419 420 421 422 423 424 425 426
	case XE_PL_VRAM1: {
		struct xe_tile *tile = mem_type_to_tile(xe, mem->mem_type);
		struct xe_ttm_vram_mgr_resource *vres =
			to_xe_ttm_vram_mgr_resource(mem);

		if (vres->used_visible_size < mem->size)
			return -EINVAL;

427 428
		mem->bus.offset = mem->start << PAGE_SHIFT;

429
		if (tile->mem.vram.mapping &&
430
		    mem->placement & TTM_PL_FLAG_CONTIGUOUS)
431
			mem->bus.addr = (u8 *)tile->mem.vram.mapping +
432 433
				mem->bus.offset;

434
		mem->bus.offset += tile->mem.vram.io_start;
435 436 437 438 439
		mem->bus.is_iomem = true;

#if  !defined(CONFIG_X86)
		mem->bus.caching = ttm_write_combined;
#endif
440
		return 0;
441
	} case XE_PL_STOLEN:
442
		return xe_ttm_stolen_io_mem_reserve(xe, mem);
443 444 445 446 447 448 449 450 451 452
	default:
		return -EINVAL;
	}
}

static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
				const struct ttm_operation_ctx *ctx)
{
	struct dma_resv_iter cursor;
	struct dma_fence *fence;
Matthew Brost's avatar
Matthew Brost committed
453 454 455
	struct drm_gpuva *gpuva;
	struct drm_gem_object *obj = &bo->ttm.base;
	struct drm_gpuvm_bo *vm_bo;
456 457 458 459
	int ret = 0;

	dma_resv_assert_held(bo->ttm.base.resv);

460
	if (!list_empty(&bo->ttm.base.gpuva.list)) {
461 462 463 464 465 466 467
		dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
				    DMA_RESV_USAGE_BOOKKEEP);
		dma_resv_for_each_fence_unlocked(&cursor, fence)
			dma_fence_enable_sw_signaling(fence);
		dma_resv_iter_end(&cursor);
	}

Matthew Brost's avatar
Matthew Brost committed
468 469 470 471
	drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
		drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
			struct xe_vma *vma = gpuva_to_vma(gpuva);
			struct xe_vm *vm = xe_vma_vm(vma);
472

Matthew Brost's avatar
Matthew Brost committed
473
			trace_xe_vma_evict(vma);
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505

		if (xe_vm_in_fault_mode(vm)) {
			/* Wait for pending binds / unbinds. */
			long timeout;

			if (ctx->no_wait_gpu &&
			    !dma_resv_test_signaled(bo->ttm.base.resv,
						    DMA_RESV_USAGE_BOOKKEEP))
				return -EBUSY;

			timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
							DMA_RESV_USAGE_BOOKKEEP,
							ctx->interruptible,
							MAX_SCHEDULE_TIMEOUT);
			if (timeout > 0) {
				ret = xe_vm_invalidate_vma(vma);
				XE_WARN_ON(ret);
			} else if (!timeout) {
				ret = -ETIME;
			} else {
				ret = timeout;
			}

		} else {
			bool vm_resv_locked = false;

			/*
			 * We need to put the vma on the vm's rebind_list,
			 * but need the vm resv to do so. If we can't verify
			 * that we indeed have it locked, put the vma an the
			 * vm's notifier.rebind_list instead and scoop later.
			 */
Matthew Brost's avatar
Matthew Brost committed
506
			if (dma_resv_trylock(xe_vm_resv(vm)))
507
				vm_resv_locked = true;
Matthew Brost's avatar
Matthew Brost committed
508
			else if (ctx->resv != xe_vm_resv(vm)) {
509
				spin_lock(&vm->notifier.list_lock);
510 511 512
				if (!(vma->gpuva.flags & XE_VMA_DESTROYED))
					list_move_tail(&vma->notifier.rebind_link,
						       &vm->notifier.rebind_list);
513 514 515 516 517
				spin_unlock(&vm->notifier.list_lock);
				continue;
			}

			xe_vm_assert_held(vm);
518 519 520
			if (vma->tile_present &&
			    !(vma->gpuva.flags & XE_VMA_DESTROYED) &&
			    list_empty(&vma->combined_links.rebind))
521 522
				list_add_tail(&vma->combined_links.rebind,
					      &vm->rebind_list);
523 524

			if (vm_resv_locked)
Matthew Brost's avatar
Matthew Brost committed
525 526
				dma_resv_unlock(xe_vm_resv(vm));
		}
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
		}
	}

	return ret;
}

/*
 * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
 * Note that unmapping the attachment is deferred to the next
 * map_attachment time, or to bo destroy (after idling) whichever comes first.
 * This is to avoid syncing before unmap_attachment(), assuming that the
 * caller relies on idling the reservation object before moving the
 * backing store out. Should that assumption not hold, then we will be able
 * to unconditionally call unmap_attachment() when moving out to system.
 */
static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
			     struct ttm_resource *new_res)
{
	struct dma_buf_attachment *attach = ttm_bo->base.import_attach;
	struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
					       ttm);
548
	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
549 550
	struct sg_table *sg;

551 552
	xe_assert(xe, attach);
	xe_assert(xe, ttm_bo->ttm);
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627

	if (new_res->mem_type == XE_PL_SYSTEM)
		goto out;

	if (ttm_bo->sg) {
		dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
		ttm_bo->sg = NULL;
	}

	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sg))
		return PTR_ERR(sg);

	ttm_bo->sg = sg;
	xe_tt->sg = sg;

out:
	ttm_bo_move_null(ttm_bo, new_res);

	return 0;
}

/**
 * xe_bo_move_notify - Notify subsystems of a pending move
 * @bo: The buffer object
 * @ctx: The struct ttm_operation_ctx controlling locking and waits.
 *
 * This function notifies subsystems of an upcoming buffer move.
 * Upon receiving such a notification, subsystems should schedule
 * halting access to the underlying pages and optionally add a fence
 * to the buffer object's dma_resv object, that signals when access is
 * stopped. The caller will wait on all dma_resv fences before
 * starting the move.
 *
 * A subsystem may commence access to the object after obtaining
 * bindings to the new backing memory under the object lock.
 *
 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
 * negative error code on error.
 */
static int xe_bo_move_notify(struct xe_bo *bo,
			     const struct ttm_operation_ctx *ctx)
{
	struct ttm_buffer_object *ttm_bo = &bo->ttm;
	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
	int ret;

	/*
	 * If this starts to call into many components, consider
	 * using a notification chain here.
	 */

	if (xe_bo_is_pinned(bo))
		return -EINVAL;

	xe_bo_vunmap(bo);
	ret = xe_bo_trigger_rebind(xe, bo, ctx);
	if (ret)
		return ret;

	/* Don't call move_notify() for imported dma-bufs. */
	if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
		dma_buf_move_notify(ttm_bo->base.dma_buf);

	return 0;
}

static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
		      struct ttm_operation_ctx *ctx,
		      struct ttm_resource *new_mem,
		      struct ttm_place *hop)
{
	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
	struct ttm_resource *old_mem = ttm_bo->resource;
628
	u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
629
	struct ttm_tt *ttm = ttm_bo->ttm;
630
	struct xe_tile *tile = NULL;
631 632
	struct dma_fence *fence;
	bool move_lacks_source;
633
	bool tt_has_data;
634 635 636
	bool needs_clear;
	int ret = 0;

637 638
	/* Bo creation path, moving to system or TT. No clearing required. */
	if (!old_mem && ttm) {
639
		ttm_bo_move_null(ttm_bo, new_mem);
640
		return 0;
641 642 643 644 645
	}

	if (ttm_bo->type == ttm_bo_type_sg) {
		ret = xe_bo_move_notify(bo, ctx);
		if (!ret)
646
			ret = xe_bo_move_dmabuf(ttm_bo, new_mem);
647 648 649
		goto out;
	}

650 651 652
	tt_has_data = ttm && (ttm_tt_is_populated(ttm) ||
			      (ttm->page_flags & TTM_TT_FLAG_SWAPPED));

653
	move_lacks_source = !mem_type_is_vram(old_mem_type) && !tt_has_data;
654 655 656 657 658

	needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
		(!ttm && ttm_bo->type == ttm_bo_type_device);

	if ((move_lacks_source && !needs_clear) ||
659
	    (old_mem_type == XE_PL_SYSTEM &&
660 661 662 663 664
	     new_mem->mem_type == XE_PL_TT)) {
		ttm_bo_move_null(ttm_bo, new_mem);
		goto out;
	}

665 666 667 668
	/*
	 * Failed multi-hop where the old_mem is still marked as
	 * TTM_PL_FLAG_TEMPORARY, should just be a dummy move.
	 */
669
	if (old_mem_type == XE_PL_TT &&
670 671 672 673 674
	    new_mem->mem_type == XE_PL_TT) {
		ttm_bo_move_null(ttm_bo, new_mem);
		goto out;
	}

675 676 677 678 679 680
	if (!move_lacks_source && !xe_bo_is_pinned(bo)) {
		ret = xe_bo_move_notify(bo, ctx);
		if (ret)
			goto out;
	}

681
	if (old_mem_type == XE_PL_TT &&
682 683 684 685 686 687 688 689 690 691 692 693 694 695
	    new_mem->mem_type == XE_PL_SYSTEM) {
		long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
						     DMA_RESV_USAGE_BOOKKEEP,
						     true,
						     MAX_SCHEDULE_TIMEOUT);
		if (timeout < 0) {
			ret = timeout;
			goto out;
		}
		ttm_bo_move_null(ttm_bo, new_mem);
		goto out;
	}

	if (!move_lacks_source &&
696 697
	    ((old_mem_type == XE_PL_SYSTEM && resource_is_vram(new_mem)) ||
	     (mem_type_is_vram(old_mem_type) &&
698 699 700 701 702 703 704 705 706
	      new_mem->mem_type == XE_PL_SYSTEM))) {
		hop->fpfn = 0;
		hop->lpfn = 0;
		hop->mem_type = XE_PL_TT;
		hop->flags = TTM_PL_FLAG_TEMPORARY;
		ret = -EMULTIHOP;
		goto out;
	}

707 708
	if (bo->tile)
		tile = bo->tile;
709
	else if (resource_is_vram(new_mem))
710
		tile = mem_type_to_tile(xe, new_mem->mem_type);
711 712
	else if (mem_type_is_vram(old_mem_type))
		tile = mem_type_to_tile(xe, old_mem_type);
713

714 715
	xe_assert(xe, tile);
	xe_tile_assert(tile, tile->migrate);
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735

	trace_xe_bo_move(bo);
	xe_device_mem_access_get(xe);

	if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
		/*
		 * Kernel memory that is pinned should only be moved on suspend
		 * / resume, some of the pinned memory is required for the
		 * device to resume / use the GPU to move other evicted memory
		 * (user memory) around. This likely could be optimized a bit
		 * futher where we find the minimum set of pinned memory
		 * required for resume but for simplity doing a memcpy for all
		 * pinned memory.
		 */
		ret = xe_bo_vmap(bo);
		if (!ret) {
			ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem);

			/* Create a new VMAP once kernel BO back in VRAM */
			if (!ret && resource_is_vram(new_mem)) {
736
				void *new_addr = tile->mem.vram.mapping +
737 738
					(new_mem->start << PAGE_SHIFT);

739 740 741 742 743 744
				if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
					ret = -EINVAL;
					xe_device_mem_access_put(xe);
					goto out;
				}

745
				xe_assert(xe, new_mem->start ==
746 747 748 749 750 751 752
					  bo->placements->fpfn);

				iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
			}
		}
	} else {
		if (move_lacks_source)
753
			fence = xe_migrate_clear(tile->migrate, bo, new_mem);
754
		else
755
			fence = xe_migrate_copy(tile->migrate,
756
						bo, bo, old_mem, new_mem);
757 758 759 760 761
		if (IS_ERR(fence)) {
			ret = PTR_ERR(fence);
			xe_device_mem_access_put(xe);
			goto out;
		}
762 763 764
		if (!move_lacks_source) {
			ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict,
							true, new_mem);
765 766 767 768 769
			if (ret) {
				dma_fence_wait(fence, false);
				ttm_bo_move_null(ttm_bo, new_mem);
				ret = 0;
			}
770 771 772 773 774 775 776 777 778 779 780
		} else {
			/*
			 * ttm_bo_move_accel_cleanup() may blow up if
			 * bo->resource == NULL, so just attach the
			 * fence and set the new resource.
			 */
			dma_resv_add_fence(ttm_bo->base.resv, fence,
					   DMA_RESV_USAGE_KERNEL);
			ttm_bo_move_null(ttm_bo, new_mem);
		}

781 782 783 784 785 786 787 788 789 790 791
		dma_fence_put(fence);
	}

	xe_device_mem_access_put(xe);
	trace_printk("new_mem->mem_type=%d\n", new_mem->mem_type);

out:
	return ret;

}

792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
/**
 * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
 * @bo: The buffer object to move.
 *
 * On successful completion, the object memory will be moved to sytem memory.
 * This function blocks until the object has been fully moved.
 *
 * This is needed to for special handling of pinned VRAM object during
 * suspend-resume.
 *
 * Return: 0 on success. Negative error code on failure.
 */
int xe_bo_evict_pinned(struct xe_bo *bo)
{
	struct ttm_place place = {
		.mem_type = XE_PL_TT,
	};
	struct ttm_placement placement = {
		.placement = &place,
		.num_placement = 1,
	};
	struct ttm_operation_ctx ctx = {
		.interruptible = false,
	};
	struct ttm_resource *new_mem;
	int ret;

	xe_bo_assert_held(bo);

	if (WARN_ON(!bo->ttm.resource))
		return -EINVAL;

	if (WARN_ON(!xe_bo_is_pinned(bo)))
		return -EINVAL;

	if (WARN_ON(!xe_bo_is_vram(bo)))
		return -EINVAL;

	ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
	if (ret)
		return ret;

	if (!bo->ttm.ttm) {
		bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0);
		if (!bo->ttm.ttm) {
			ret = -ENOMEM;
			goto err_res_free;
		}
	}

	ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
	if (ret)
		goto err_res_free;

	ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
	if (ret)
		goto err_res_free;

	ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
	if (ret)
		goto err_res_free;

	dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
			      false, MAX_SCHEDULE_TIMEOUT);

	return 0;

err_res_free:
	ttm_resource_free(&bo->ttm, &new_mem);
	return ret;
}

/**
 * xe_bo_restore_pinned() - Restore a pinned VRAM object
 * @bo: The buffer object to move.
 *
 * On successful completion, the object memory will be moved back to VRAM.
 * This function blocks until the object has been fully moved.
 *
 * This is needed to for special handling of pinned VRAM object during
 * suspend-resume.
 *
 * Return: 0 on success. Negative error code on failure.
 */
int xe_bo_restore_pinned(struct xe_bo *bo)
{
	struct ttm_operation_ctx ctx = {
		.interruptible = false,
	};
	struct ttm_resource *new_mem;
	int ret;

	xe_bo_assert_held(bo);

	if (WARN_ON(!bo->ttm.resource))
		return -EINVAL;

	if (WARN_ON(!xe_bo_is_pinned(bo)))
		return -EINVAL;

	if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm))
		return -EINVAL;

	ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
	if (ret)
		return ret;

	ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
	if (ret)
		goto err_res_free;

	ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
	if (ret)
		goto err_res_free;

	ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
	if (ret)
		goto err_res_free;

	dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
			      false, MAX_SCHEDULE_TIMEOUT);

	return 0;

err_res_free:
	ttm_resource_free(&bo->ttm, &new_mem);
	return ret;
}

921
static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
922 923
				       unsigned long page_offset)
{
924 925
	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
926
	struct xe_tile *tile = mem_type_to_tile(xe, ttm_bo->resource->mem_type);
927 928
	struct xe_res_cursor cursor;

929 930 931 932
	if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
		return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;

	xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
933
	return (tile->mem.vram.io_start + cursor.start) >> PAGE_SHIFT;
934 935 936 937 938 939 940 941 942 943
}

static void __xe_bo_vunmap(struct xe_bo *bo);

/*
 * TODO: Move this function to TTM so we don't rely on how TTM does its
 * locking, thereby abusing TTM internals.
 */
static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
{
944
	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
945 946
	bool locked;

947
	xe_assert(xe, !kref_read(&ttm_bo->kref));
948 949 950 951 952 953 954 955 956 957

	/*
	 * We can typically only race with TTM trylocking under the
	 * lru_lock, which will immediately be unlocked again since
	 * the ttm_bo refcount is zero at this point. So trylocking *should*
	 * always succeed here, as long as we hold the lru lock.
	 */
	spin_lock(&ttm_bo->bdev->lru_lock);
	locked = dma_resv_trylock(ttm_bo->base.resv);
	spin_unlock(&ttm_bo->bdev->lru_lock);
958
	xe_assert(xe, locked);
959 960 961 962 963 964 965 966 967 968 969 970 971 972 973

	return locked;
}

static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
{
	struct dma_resv_iter cursor;
	struct dma_fence *fence;
	struct dma_fence *replacement = NULL;
	struct xe_bo *bo;

	if (!xe_bo_is_xe_bo(ttm_bo))
		return;

	bo = ttm_to_xe_bo(ttm_bo);
974
	xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046

	/*
	 * Corner case where TTM fails to allocate memory and this BOs resv
	 * still points the VMs resv
	 */
	if (ttm_bo->base.resv != &ttm_bo->base._resv)
		return;

	if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
		return;

	/*
	 * Scrub the preempt fences if any. The unbind fence is already
	 * attached to the resv.
	 * TODO: Don't do this for external bos once we scrub them after
	 * unbind.
	 */
	dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
				DMA_RESV_USAGE_BOOKKEEP, fence) {
		if (xe_fence_is_xe_preempt(fence) &&
		    !dma_fence_is_signaled(fence)) {
			if (!replacement)
				replacement = dma_fence_get_stub();

			dma_resv_replace_fences(ttm_bo->base.resv,
						fence->context,
						replacement,
						DMA_RESV_USAGE_BOOKKEEP);
		}
	}
	dma_fence_put(replacement);

	dma_resv_unlock(ttm_bo->base.resv);
}

static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
{
	if (!xe_bo_is_xe_bo(ttm_bo))
		return;

	/*
	 * Object is idle and about to be destroyed. Release the
	 * dma-buf attachment.
	 */
	if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
		struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm,
						       struct xe_ttm_tt, ttm);

		dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg,
					 DMA_BIDIRECTIONAL);
		ttm_bo->sg = NULL;
		xe_tt->sg = NULL;
	}
}

struct ttm_device_funcs xe_ttm_funcs = {
	.ttm_tt_create = xe_ttm_tt_create,
	.ttm_tt_populate = xe_ttm_tt_populate,
	.ttm_tt_unpopulate = xe_ttm_tt_unpopulate,
	.ttm_tt_destroy = xe_ttm_tt_destroy,
	.evict_flags = xe_evict_flags,
	.move = xe_bo_move,
	.io_mem_reserve = xe_ttm_io_mem_reserve,
	.io_mem_pfn = xe_ttm_io_mem_pfn,
	.release_notify = xe_ttm_bo_release_notify,
	.eviction_valuable = ttm_bo_eviction_valuable,
	.delete_mem_notify = xe_ttm_bo_delete_mem_notify,
};

static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
{
	struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1047
	struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1048 1049 1050 1051 1052

	if (bo->ttm.base.import_attach)
		drm_prime_gem_destroy(&bo->ttm.base, NULL);
	drm_gem_object_release(&bo->ttm.base);

1053
	xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
1054 1055

	if (bo->ggtt_node.size)
1056
		xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
1057

1058 1059 1060 1061 1062
#ifdef CONFIG_PROC_FS
	if (bo->client)
		xe_drm_client_remove_bo(bo);
#endif

1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
	if (bo->vm && xe_bo_is_user(bo))
		xe_vm_put(bo->vm);

	kfree(bo);
}

static void xe_gem_object_free(struct drm_gem_object *obj)
{
	/* Our BO reference counting scheme works as follows:
	 *
	 * The gem object kref is typically used throughout the driver,
	 * and the gem object holds a ttm_buffer_object refcount, so
	 * that when the last gem object reference is put, which is when
	 * we end up in this function, we put also that ttm_buffer_object
	 * refcount. Anything using gem interfaces is then no longer
	 * allowed to access the object in a way that requires a gem
	 * refcount, including locking the object.
	 *
	 * driver ttm callbacks is allowed to use the ttm_buffer_object
	 * refcount directly if needed.
	 */
	__xe_bo_vunmap(gem_to_xe_bo(obj));
	ttm_bo_put(container_of(obj, struct ttm_buffer_object, base));
}

Matthew Brost's avatar
Matthew Brost committed
1088 1089 1090 1091 1092 1093
static void xe_gem_object_close(struct drm_gem_object *obj,
				struct drm_file *file_priv)
{
	struct xe_bo *bo = gem_to_xe_bo(obj);

	if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) {
1094
		xe_assert(xe_bo_device(bo), xe_bo_is_user(bo));
Matthew Brost's avatar
Matthew Brost committed
1095

1096
		xe_bo_lock(bo, false);
Matthew Brost's avatar
Matthew Brost committed
1097
		ttm_bo_set_bulk_move(&bo->ttm, NULL);
1098
		xe_bo_unlock(bo);
Matthew Brost's avatar
Matthew Brost committed
1099 1100 1101
	}
}

1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
static bool should_migrate_to_system(struct xe_bo *bo)
{
	struct xe_device *xe = xe_bo_device(bo);

	return xe_device_in_fault_mode(xe) && bo->props.cpu_atomic;
}

static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
{
	struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
	struct drm_device *ddev = tbo->base.dev;
	vm_fault_t ret;
	int idx, r = 0;

	ret = ttm_bo_vm_reserve(tbo, vmf);
	if (ret)
		return ret;

	if (drm_dev_enter(ddev, &idx)) {
		struct xe_bo *bo = ttm_to_xe_bo(tbo);

		trace_xe_bo_cpu_fault(bo);

		if (should_migrate_to_system(bo)) {
			r = xe_bo_migrate(bo, XE_PL_TT);
			if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)
				ret = VM_FAULT_NOPAGE;
			else if (r)
				ret = VM_FAULT_SIGBUS;
		}
		if (!ret)
			ret = ttm_bo_vm_fault_reserved(vmf,
						       vmf->vma->vm_page_prot,
						       TTM_BO_VM_NUM_PREFAULT);
		drm_dev_exit(idx);
	} else {
		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
	}
	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
		return ret;

	dma_resv_unlock(tbo->base.resv);
	return ret;
}

static const struct vm_operations_struct xe_gem_vm_ops = {
	.fault = xe_gem_fault,
	.open = ttm_bo_vm_open,
	.close = ttm_bo_vm_close,
	.access = ttm_bo_vm_access
};

static const struct drm_gem_object_funcs xe_gem_object_funcs = {
	.free = xe_gem_object_free,
Matthew Brost's avatar
Matthew Brost committed
1156
	.close = xe_gem_object_close,
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
	.mmap = drm_gem_ttm_mmap,
	.export = xe_gem_prime_export,
	.vm_ops = &xe_gem_vm_ops,
};

/**
 * xe_bo_alloc - Allocate storage for a struct xe_bo
 *
 * This funcition is intended to allocate storage to be used for input
 * to __xe_bo_create_locked(), in the case a pointer to the bo to be
 * created is needed before the call to __xe_bo_create_locked().
 * If __xe_bo_create_locked ends up never to be called, then the
 * storage allocated with this function needs to be freed using
 * xe_bo_free().
 *
 * Return: A pointer to an uninitialized struct xe_bo on success,
 * ERR_PTR(-ENOMEM) on error.
 */
struct xe_bo *xe_bo_alloc(void)
{
	struct xe_bo *bo = kzalloc(sizeof(*bo), GFP_KERNEL);

	if (!bo)
		return ERR_PTR(-ENOMEM);

	return bo;
}

/**
 * xe_bo_free - Free storage allocated using xe_bo_alloc()
 * @bo: The buffer object storage.
 *
 * Refer to xe_bo_alloc() documentation for valid use-cases.
 */
void xe_bo_free(struct xe_bo *bo)
{
	kfree(bo);
}

struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
1197
				    struct xe_tile *tile, struct dma_resv *resv,
Matthew Brost's avatar
Matthew Brost committed
1198 1199
				    struct ttm_lru_bulk_move *bulk, size_t size,
				    enum ttm_bo_type type, u32 flags)
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
{
	struct ttm_operation_ctx ctx = {
		.interruptible = true,
		.no_wait_gpu = false,
	};
	struct ttm_placement *placement;
	uint32_t alignment;
	int err;

	/* Only kernel objects should set GT */
1210
	xe_assert(xe, !tile || type == ttm_bo_type_kernel);
1211

1212 1213
	if (XE_WARN_ON(!size)) {
		xe_bo_free(bo);
1214
		return ERR_PTR(-EINVAL);
1215
	}
1216

1217 1218 1219 1220 1221 1222
	if (!bo) {
		bo = xe_bo_alloc();
		if (IS_ERR(bo))
			return bo;
	}

1223
	if (flags & (XE_BO_CREATE_VRAM_MASK | XE_BO_CREATE_STOLEN_BIT) &&
1224 1225 1226 1227 1228 1229 1230 1231 1232
	    !(flags & XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT) &&
	    xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) {
		size = ALIGN(size, SZ_64K);
		flags |= XE_BO_INTERNAL_64K;
		alignment = SZ_64K >> PAGE_SHIFT;
	} else {
		alignment = SZ_4K >> PAGE_SHIFT;
	}

1233
	bo->tile = tile;
1234 1235 1236 1237 1238 1239 1240 1241
	bo->size = size;
	bo->flags = flags;
	bo->ttm.base.funcs = &xe_gem_object_funcs;
	bo->props.preferred_mem_class = XE_BO_PROPS_INVALID;
	bo->props.preferred_gt = XE_BO_PROPS_INVALID;
	bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;
	bo->ttm.priority = DRM_XE_VMA_PRIORITY_NORMAL;
	INIT_LIST_HEAD(&bo->pinned_link);
1242 1243 1244
#ifdef CONFIG_PROC_FS
	INIT_LIST_HEAD(&bo->client_link);
#endif
1245 1246 1247 1248

	drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);

	if (resv) {
1249
		ctx.allow_res_evict = !(flags & XE_BO_CREATE_NO_RESV_EVICT);
1250 1251 1252
		ctx.resv = resv;
	}

1253 1254
	if (!(flags & XE_BO_FIXED_PLACEMENT_BIT)) {
		err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
1255 1256
		if (WARN_ON(err)) {
			xe_ttm_bo_destroy(&bo->ttm);
1257
			return ERR_PTR(err);
1258
		}
1259
	}
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271

	/* Defer populating type_sg bos */
	placement = (type == ttm_bo_type_sg ||
		     bo->flags & XE_BO_DEFER_BACKING) ? &sys_placement :
		&bo->placement;
	err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
				   placement, alignment,
				   &ctx, NULL, resv, xe_ttm_bo_destroy);
	if (err)
		return ERR_PTR(err);

	bo->created = true;
Matthew Brost's avatar
Matthew Brost committed
1272 1273 1274 1275
	if (bulk)
		ttm_bo_set_bulk_move(&bo->ttm, bulk);
	else
		ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1276 1277 1278 1279

	return bo;
}

1280 1281 1282 1283
static int __xe_bo_fixed_placement(struct xe_device *xe,
				   struct xe_bo *bo,
				   u32 flags,
				   u64 start, u64 end, u64 size)
1284
{
1285 1286 1287 1288 1289 1290 1291 1292 1293
	struct ttm_place *place = bo->placements;

	if (flags & (XE_BO_CREATE_USER_BIT|XE_BO_CREATE_SYSTEM_BIT))
		return -EINVAL;

	place->flags = TTM_PL_FLAG_CONTIGUOUS;
	place->fpfn = start >> PAGE_SHIFT;
	place->lpfn = end >> PAGE_SHIFT;

1294
	switch (flags & (XE_BO_CREATE_STOLEN_BIT | XE_BO_CREATE_VRAM_MASK)) {
1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
	case XE_BO_CREATE_VRAM0_BIT:
		place->mem_type = XE_PL_VRAM0;
		break;
	case XE_BO_CREATE_VRAM1_BIT:
		place->mem_type = XE_PL_VRAM1;
		break;
	case XE_BO_CREATE_STOLEN_BIT:
		place->mem_type = XE_PL_STOLEN;
		break;

	default:
		/* 0 or multiple of the above set */
		return -EINVAL;
	}

	bo->placement = (struct ttm_placement) {
		.num_placement = 1,
		.placement = place,
		.num_busy_placement = 1,
		.busy_placement = place,
	};

	return 0;
}

struct xe_bo *
xe_bo_create_locked_range(struct xe_device *xe,
1322
			  struct xe_tile *tile, struct xe_vm *vm,
1323 1324 1325 1326
			  size_t size, u64 start, u64 end,
			  enum ttm_bo_type type, u32 flags)
{
	struct xe_bo *bo = NULL;
1327 1328 1329 1330
	int err;

	if (vm)
		xe_vm_assert_held(vm);
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344

	if (start || end != ~0ULL) {
		bo = xe_bo_alloc();
		if (IS_ERR(bo))
			return bo;

		flags |= XE_BO_FIXED_PLACEMENT_BIT;
		err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
		if (err) {
			xe_bo_free(bo);
			return ERR_PTR(err);
		}
	}

Matthew Brost's avatar
Matthew Brost committed
1345
	bo = __xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
Matthew Brost's avatar
Matthew Brost committed
1346 1347 1348
				   vm && !xe_vm_in_fault_mode(vm) &&
				   flags & XE_BO_CREATE_USER_BIT ?
				   &vm->lru_bulk_move : NULL, size,
1349 1350 1351 1352
				   type, flags);
	if (IS_ERR(bo))
		return bo;

Matthew Brost's avatar
Matthew Brost committed
1353 1354 1355 1356 1357 1358 1359
	/*
	 * Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
	 * to ensure the shared resv doesn't disappear under the bo, the bo
	 * will keep a reference to the vm, and avoid circular references
	 * by having all the vm's bo refereferences released at vm close
	 * time.
	 */
1360 1361 1362 1363
	if (vm && xe_bo_is_user(bo))
		xe_vm_get(vm);
	bo->vm = vm;

1364
	if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
1365 1366
		if (!tile && flags & XE_BO_CREATE_STOLEN_BIT)
			tile = xe_device_get_root_tile(xe);
1367

1368
		xe_assert(xe, tile);
1369

1370 1371
		if (flags & XE_BO_CREATE_STOLEN_BIT &&
		    flags & XE_BO_FIXED_PLACEMENT_BIT) {
1372
			err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo, start);
1373
		} else {
1374
			err = xe_ggtt_insert_bo(tile->mem.ggtt, bo);
1375
		}
1376 1377 1378 1379 1380 1381 1382
		if (err)
			goto err_unlock_put_bo;
	}

	return bo;

err_unlock_put_bo:
1383
	__xe_bo_unset_bulk_move(bo);
1384 1385 1386 1387 1388
	xe_bo_unlock_vm_held(bo);
	xe_bo_put(bo);
	return ERR_PTR(err);
}

1389
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
1390 1391 1392
				  struct xe_vm *vm, size_t size,
				  enum ttm_bo_type type, u32 flags)
{
1393
	return xe_bo_create_locked_range(xe, tile, vm, size, 0, ~0ULL, type, flags);
1394 1395
}

1396
struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
1397 1398 1399
			   struct xe_vm *vm, size_t size,
			   enum ttm_bo_type type, u32 flags)
{
1400
	struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags);
1401 1402 1403 1404 1405 1406 1407

	if (!IS_ERR(bo))
		xe_bo_unlock_vm_held(bo);

	return bo;
}

1408
struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
1409 1410 1411
				      struct xe_vm *vm,
				      size_t size, u64 offset,
				      enum ttm_bo_type type, u32 flags)
1412
{
1413
	struct xe_bo *bo;
1414
	int err;
1415 1416 1417 1418
	u64 start = offset == ~0ull ? 0 : offset;
	u64 end = offset == ~0ull ? offset : start + size;

	if (flags & XE_BO_CREATE_STOLEN_BIT &&
1419
	    xe_ttm_stolen_cpu_access_needs_ggtt(xe))
1420
		flags |= XE_BO_CREATE_GGTT_BIT;
1421

1422 1423
	bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
				       flags | XE_BO_NEEDS_CPU_ACCESS);
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446
	if (IS_ERR(bo))
		return bo;

	err = xe_bo_pin(bo);
	if (err)
		goto err_put;

	err = xe_bo_vmap(bo);
	if (err)
		goto err_unpin;

	xe_bo_unlock_vm_held(bo);

	return bo;

err_unpin:
	xe_bo_unpin(bo);
err_put:
	xe_bo_unlock_vm_held(bo);
	xe_bo_put(bo);
	return ERR_PTR(err);
}

1447
struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
1448 1449 1450
				   struct xe_vm *vm, size_t size,
				   enum ttm_bo_type type, u32 flags)
{
1451
	return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags);
1452 1453
}

1454
struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
1455 1456 1457
				     const void *data, size_t size,
				     enum ttm_bo_type type, u32 flags)
{
1458
	struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL,
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472
						ALIGN(size, PAGE_SIZE),
						type, flags);
	if (IS_ERR(bo))
		return bo;

	xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);

	return bo;
}

/*
 * XXX: This is in the VM bind data path, likely should calculate this once and
 * store, with a recalculation if the BO is moved.
 */
1473
uint64_t vram_region_gpu_offset(struct ttm_resource *res)
1474
{
1475
	struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
1476
	struct xe_tile *tile = mem_type_to_tile(xe, res->mem_type);
1477

1478
	if (res->mem_type == XE_PL_STOLEN)
1479 1480
		return xe_ttm_stolen_gpu_offset(xe);

1481
	return tile->mem.vram.dpa_base;
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
}

/**
 * xe_bo_pin_external - pin an external BO
 * @bo: buffer object to be pinned
 *
 * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
 * BO. Unique call compared to xe_bo_pin as this function has it own set of
 * asserts and code to ensure evict / restore on suspend / resume.
 *
 * Returns 0 for success, negative error code otherwise.
 */
int xe_bo_pin_external(struct xe_bo *bo)
{
	struct xe_device *xe = xe_bo_device(bo);
	int err;

1499 1500
	xe_assert(xe, !bo->vm);
	xe_assert(xe, xe_bo_is_user(bo));
1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531

	if (!xe_bo_is_pinned(bo)) {
		err = xe_bo_validate(bo, NULL, false);
		if (err)
			return err;

		if (xe_bo_is_vram(bo)) {
			spin_lock(&xe->pinned.lock);
			list_add_tail(&bo->pinned_link,
				      &xe->pinned.external_vram);
			spin_unlock(&xe->pinned.lock);
		}
	}

	ttm_bo_pin(&bo->ttm);

	/*
	 * FIXME: If we always use the reserve / unreserve functions for locking
	 * we do not need this.
	 */
	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);

	return 0;
}

int xe_bo_pin(struct xe_bo *bo)
{
	struct xe_device *xe = xe_bo_device(bo);
	int err;

	/* We currently don't expect user BO to be pinned */
1532
	xe_assert(xe, !xe_bo_is_user(bo));
1533 1534

	/* Pinned object must be in GGTT or have pinned flag */
1535 1536
	xe_assert(xe, bo->flags & (XE_BO_CREATE_PINNED_BIT |
				   XE_BO_CREATE_GGTT_BIT));
1537 1538 1539 1540 1541

	/*
	 * No reason we can't support pinning imported dma-bufs we just don't
	 * expect to pin an imported dma-buf.
	 */
1542
	xe_assert(xe, !bo->ttm.base.import_attach);
1543 1544

	/* We only expect at most 1 pin */
1545
	xe_assert(xe, !xe_bo_is_pinned(bo));
1546 1547 1548 1549 1550 1551

	err = xe_bo_validate(bo, NULL, false);
	if (err)
		return err;

	/*
1552 1553 1554
	 * For pinned objects in on DGFX, which are also in vram, we expect
	 * these to be in contiguous VRAM memory. Required eviction / restore
	 * during suspend / resume (force restore to same physical address).
1555 1556 1557 1558 1559
	 */
	if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
	    bo->flags & XE_BO_INTERNAL_TEST)) {
		struct ttm_place *place = &(bo->placements[0]);

1560
		if (mem_type_is_vram(place->mem_type)) {
1561
			xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
1562

1563
			place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
1564
				       vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
1565
			place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
1566

1567 1568 1569 1570
			spin_lock(&xe->pinned.lock);
			list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
			spin_unlock(&xe->pinned.lock);
		}
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
	}

	ttm_bo_pin(&bo->ttm);

	/*
	 * FIXME: If we always use the reserve / unreserve functions for locking
	 * we do not need this.
	 */
	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);

	return 0;
}

/**
 * xe_bo_unpin_external - unpin an external BO
 * @bo: buffer object to be unpinned
 *
 * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
 * BO. Unique call compared to xe_bo_unpin as this function has it own set of
 * asserts and code to ensure evict / restore on suspend / resume.
 *
 * Returns 0 for success, negative error code otherwise.
 */
void xe_bo_unpin_external(struct xe_bo *bo)
{
	struct xe_device *xe = xe_bo_device(bo);

1598 1599 1600
	xe_assert(xe, !bo->vm);
	xe_assert(xe, xe_bo_is_pinned(bo));
	xe_assert(xe, xe_bo_is_user(bo));
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620

	if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) {
		spin_lock(&xe->pinned.lock);
		list_del_init(&bo->pinned_link);
		spin_unlock(&xe->pinned.lock);
	}

	ttm_bo_unpin(&bo->ttm);

	/*
	 * FIXME: If we always use the reserve / unreserve functions for locking
	 * we do not need this.
	 */
	ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
}

void xe_bo_unpin(struct xe_bo *bo)
{
	struct xe_device *xe = xe_bo_device(bo);

1621 1622
	xe_assert(xe, !bo->ttm.base.import_attach);
	xe_assert(xe, xe_bo_is_pinned(bo));
1623 1624 1625

	if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
	    bo->flags & XE_BO_INTERNAL_TEST)) {
1626
		struct ttm_place *place = &(bo->placements[0]);
1627

1628
		if (mem_type_is_vram(place->mem_type)) {
1629
			xe_assert(xe, !list_empty(&bo->pinned_link));
1630 1631 1632 1633 1634

			spin_lock(&xe->pinned.lock);
			list_del_init(&bo->pinned_link);
			spin_unlock(&xe->pinned.lock);
		}
1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
	}

	ttm_bo_unpin(&bo->ttm);
}

/**
 * xe_bo_validate() - Make sure the bo is in an allowed placement
 * @bo: The bo,
 * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or
 *      NULL. Used together with @allow_res_evict.
 * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
 *                   reservation object.
 *
 * Make sure the bo is in allowed placement, migrating it if necessary. If
 * needed, other bos will be evicted. If bos selected for eviction shares
 * the @vm's reservation object, they can be evicted iff @allow_res_evict is
 * set to true, otherwise they will be bypassed.
 *
 * Return: 0 on success, negative error code on failure. May return
 * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
 */
int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
{
	struct ttm_operation_ctx ctx = {
		.interruptible = true,
		.no_wait_gpu = false,
	};

	if (vm) {
		lockdep_assert_held(&vm->lock);
		xe_vm_assert_held(vm);

		ctx.allow_res_evict = allow_res_evict;
Matthew Brost's avatar
Matthew Brost committed
1668
		ctx.resv = xe_vm_resv(vm);
1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
	}

	return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
}

bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
{
	if (bo->destroy == &xe_ttm_bo_destroy)
		return true;

	return false;
}

1682 1683 1684 1685 1686 1687
/*
 * Resolve a BO address. There is no assert to check if the proper lock is held
 * so it should only be used in cases where it is not fatal to get the wrong
 * address, such as printing debug information, but not in cases where memory is
 * written based on this result.
 */
1688
dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
1689
{
1690
	struct xe_device *xe = xe_bo_device(bo);
1691 1692 1693
	struct xe_res_cursor cur;
	u64 page;

1694
	xe_assert(xe, page_size <= PAGE_SIZE);
1695 1696 1697
	page = offset >> PAGE_SHIFT;
	offset &= (PAGE_SIZE - 1);

1698
	if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
1699
		xe_assert(xe, bo->ttm.ttm);
1700 1701 1702 1703 1704 1705 1706 1707 1708

		xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT,
				page_size, &cur);
		return xe_res_dma(&cur) + offset;
	} else {
		struct xe_res_cursor cur;

		xe_res_first(bo->ttm.resource, page << PAGE_SHIFT,
			     page_size, &cur);
1709
		return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource);
1710 1711 1712
	}
}

1713
dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
1714 1715 1716
{
	if (!READ_ONCE(bo->ttm.pin_count))
		xe_bo_assert_held(bo);
1717
	return __xe_bo_addr(bo, offset, page_size);
1718 1719
}

1720 1721 1722 1723 1724 1725 1726 1727
int xe_bo_vmap(struct xe_bo *bo)
{
	void *virtual;
	bool is_iomem;
	int ret;

	xe_bo_assert_held(bo);

1728 1729 1730
	if (!(bo->flags & XE_BO_NEEDS_CPU_ACCESS))
		return -EINVAL;

1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775
	if (!iosys_map_is_null(&bo->vmap))
		return 0;

	/*
	 * We use this more or less deprecated interface for now since
	 * ttm_bo_vmap() doesn't offer the optimization of kmapping
	 * single page bos, which is done here.
	 * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap
	 * to use struct iosys_map.
	 */
	ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap);
	if (ret)
		return ret;

	virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
	if (is_iomem)
		iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual);
	else
		iosys_map_set_vaddr(&bo->vmap, virtual);

	return 0;
}

static void __xe_bo_vunmap(struct xe_bo *bo)
{
	if (!iosys_map_is_null(&bo->vmap)) {
		iosys_map_clear(&bo->vmap);
		ttm_bo_kunmap(&bo->kmap);
	}
}

void xe_bo_vunmap(struct xe_bo *bo)
{
	xe_bo_assert_held(bo);
	__xe_bo_vunmap(bo);
}

int xe_gem_create_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct xe_device *xe = to_xe_device(dev);
	struct xe_file *xef = to_xe_file(file);
	struct drm_xe_gem_create *args = data;
	struct xe_vm *vm = NULL;
	struct xe_bo *bo;
1776
	unsigned int bo_flags = XE_BO_CREATE_USER_BIT;
1777 1778 1779
	u32 handle;
	int err;

1780 1781
	if (XE_IOCTL_DBG(xe, args->extensions) || XE_IOCTL_DBG(xe, args->pad) ||
	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1782 1783
		return -EINVAL;

1784
	if (XE_IOCTL_DBG(xe, args->flags &
1785 1786
			 ~(XE_GEM_CREATE_FLAG_DEFER_BACKING |
			   XE_GEM_CREATE_FLAG_SCANOUT |
1787
			   XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM |
1788 1789 1790 1791
			   xe->info.mem_region_mask)))
		return -EINVAL;

	/* at least one memory type must be specified */
1792
	if (XE_IOCTL_DBG(xe, !(args->flags & xe->info.mem_region_mask)))
1793 1794
		return -EINVAL;

1795
	if (XE_IOCTL_DBG(xe, args->handle))
1796 1797
		return -EINVAL;

1798
	if (XE_IOCTL_DBG(xe, !args->size))
1799 1800
		return -EINVAL;

1801
	if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX))
1802 1803
		return -EINVAL;

1804
	if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK))
1805 1806 1807 1808 1809 1810 1811 1812 1813
		return -EINVAL;

	if (args->flags & XE_GEM_CREATE_FLAG_DEFER_BACKING)
		bo_flags |= XE_BO_DEFER_BACKING;

	if (args->flags & XE_GEM_CREATE_FLAG_SCANOUT)
		bo_flags |= XE_BO_SCANOUT_BIT;

	bo_flags |= args->flags << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1);
1814 1815 1816 1817 1818 1819 1820 1821

	if (args->flags & XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
		if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_CREATE_VRAM_MASK)))
			return -EINVAL;

		bo_flags |= XE_BO_NEEDS_CPU_ACCESS;
	}

1822 1823 1824 1825
	if (args->vm_id) {
		vm = xe_vm_lookup(xef, args->vm_id);
		if (XE_IOCTL_DBG(xe, !vm))
			return -ENOENT;
1826
		err = xe_vm_lock(vm, true);
1827 1828 1829 1830 1831 1832
		if (err) {
			xe_vm_put(vm);
			return err;
		}
	}

1833 1834
	bo = xe_bo_create(xe, NULL, vm, args->size, ttm_bo_type_device,
			  bo_flags);
1835 1836 1837
	if (IS_ERR(bo)) {
		err = PTR_ERR(bo);
		goto out_vm;
1838 1839 1840 1841
	}

	err = drm_gem_handle_create(file, &bo->ttm.base, &handle);
	if (err)
1842
		goto out_bulk;
1843 1844

	args->handle = handle;
1845
	goto out_put;
1846

1847 1848 1849 1850 1851 1852 1853
out_bulk:
	if (vm && !xe_vm_in_fault_mode(vm))
		__xe_bo_unset_bulk_move(bo);
out_put:
	xe_bo_put(bo);
out_vm:
	if (vm) {
1854
		xe_vm_unlock(vm);
1855 1856 1857
		xe_vm_put(vm);
	}
	return err;
1858 1859 1860 1861 1862 1863 1864 1865 1866
}

int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
			     struct drm_file *file)
{
	struct xe_device *xe = to_xe_device(dev);
	struct drm_xe_gem_mmap_offset *args = data;
	struct drm_gem_object *gem_obj;

1867 1868
	if (XE_IOCTL_DBG(xe, args->extensions) ||
	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1869 1870
		return -EINVAL;

1871
	if (XE_IOCTL_DBG(xe, args->flags))
1872 1873 1874
		return -EINVAL;

	gem_obj = drm_gem_object_lookup(file, args->handle);
1875
	if (XE_IOCTL_DBG(xe, !gem_obj))
1876 1877 1878 1879 1880 1881 1882 1883 1884
		return -ENOENT;

	/* The mmap offset was set up at BO allocation time. */
	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);

	xe_bo_put(gem_to_xe_bo(gem_obj));
	return 0;
}

1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897
/**
 * xe_bo_lock() - Lock the buffer object's dma_resv object
 * @bo: The struct xe_bo whose lock is to be taken
 * @intr: Whether to perform any wait interruptible
 *
 * Locks the buffer object's dma_resv object. If the buffer object is
 * pointing to a shared dma_resv object, that shared lock is locked.
 *
 * Return: 0 on success, -EINTR if @intr is true and the wait for a
 * contended lock was interrupted. If @intr is set to false, the
 * function always returns 0.
 */
int xe_bo_lock(struct xe_bo *bo, bool intr)
1898
{
1899 1900
	if (intr)
		return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL);
1901

1902
	dma_resv_lock(bo->ttm.base.resv, NULL);
1903

1904
	return 0;
1905 1906
}

1907 1908 1909 1910 1911 1912 1913
/**
 * xe_bo_unlock() - Unlock the buffer object's dma_resv object
 * @bo: The struct xe_bo whose lock is to be released.
 *
 * Unlock a buffer object lock that was locked by xe_bo_lock().
 */
void xe_bo_unlock(struct xe_bo *bo)
1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974
{
	dma_resv_unlock(bo->ttm.base.resv);
}

/**
 * xe_bo_can_migrate - Whether a buffer object likely can be migrated
 * @bo: The buffer object to migrate
 * @mem_type: The TTM memory type intended to migrate to
 *
 * Check whether the buffer object supports migration to the
 * given memory type. Note that pinning may affect the ability to migrate as
 * returned by this function.
 *
 * This function is primarily intended as a helper for checking the
 * possibility to migrate buffer objects and can be called without
 * the object lock held.
 *
 * Return: true if migration is possible, false otherwise.
 */
bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type)
{
	unsigned int cur_place;

	if (bo->ttm.type == ttm_bo_type_kernel)
		return true;

	if (bo->ttm.type == ttm_bo_type_sg)
		return false;

	for (cur_place = 0; cur_place < bo->placement.num_placement;
	     cur_place++) {
		if (bo->placements[cur_place].mem_type == mem_type)
			return true;
	}

	return false;
}

static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
{
	memset(place, 0, sizeof(*place));
	place->mem_type = mem_type;
}

/**
 * xe_bo_migrate - Migrate an object to the desired region id
 * @bo: The buffer object to migrate.
 * @mem_type: The TTM region type to migrate to.
 *
 * Attempt to migrate the buffer object to the desired memory region. The
 * buffer object may not be pinned, and must be locked.
 * On successful completion, the object memory type will be updated,
 * but an async migration task may not have completed yet, and to
 * accomplish that, the object's kernel fences must be signaled with
 * the object lock held.
 *
 * Return: 0 on success. Negative error code on failure. In particular may
 * return -EINTR or -ERESTARTSYS if signal pending.
 */
int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
{
1975
	struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
	struct ttm_operation_ctx ctx = {
		.interruptible = true,
		.no_wait_gpu = false,
	};
	struct ttm_placement placement;
	struct ttm_place requested;

	xe_bo_assert_held(bo);

	if (bo->ttm.resource->mem_type == mem_type)
		return 0;

	if (xe_bo_is_pinned(bo))
		return -EBUSY;

	if (!xe_bo_can_migrate(bo, mem_type))
		return -EINVAL;

	xe_place_from_ttm_type(mem_type, &requested);
	placement.num_placement = 1;
	placement.num_busy_placement = 1;
	placement.placement = &requested;
	placement.busy_placement = &requested;

2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011
	/*
	 * Stolen needs to be handled like below VRAM handling if we ever need
	 * to support it.
	 */
	drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN);

	if (mem_type_is_vram(mem_type)) {
		u32 c = 0;

		add_vram(xe, bo, &requested, bo->flags, mem_type, &c);
	}

2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062
	return ttm_bo_validate(&bo->ttm, &placement, &ctx);
}

/**
 * xe_bo_evict - Evict an object to evict placement
 * @bo: The buffer object to migrate.
 * @force_alloc: Set force_alloc in ttm_operation_ctx
 *
 * On successful completion, the object memory will be moved to evict
 * placement. Ths function blocks until the object has been fully moved.
 *
 * Return: 0 on success. Negative error code on failure.
 */
int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
{
	struct ttm_operation_ctx ctx = {
		.interruptible = false,
		.no_wait_gpu = false,
		.force_alloc = force_alloc,
	};
	struct ttm_placement placement;
	int ret;

	xe_evict_flags(&bo->ttm, &placement);
	ret = ttm_bo_validate(&bo->ttm, &placement, &ctx);
	if (ret)
		return ret;

	dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
			      false, MAX_SCHEDULE_TIMEOUT);

	return 0;
}

/**
 * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
 * placed in system memory.
 * @bo: The xe_bo
 *
 * If a bo has an allowable placement in XE_PL_TT memory, it can't use
 * flat CCS compression, because the GPU then has no way to access the
 * CCS metadata using relevant commands. For the opposite case, we need to
 * allocate storage for the CCS metadata when the BO is not resident in
 * VRAM memory.
 *
 * Return: true if extra pages need to be allocated, false otherwise.
 */
bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
{
	return bo->ttm.type == ttm_bo_type_device &&
		!(bo->flags & XE_BO_CREATE_SYSTEM_BIT) &&
2063
		(bo->flags & XE_BO_CREATE_VRAM_MASK);
2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126
}

/**
 * __xe_bo_release_dummy() - Dummy kref release function
 * @kref: The embedded struct kref.
 *
 * Dummy release function for xe_bo_put_deferred(). Keep off.
 */
void __xe_bo_release_dummy(struct kref *kref)
{
}

/**
 * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
 * @deferred: The lockless list used for the call to xe_bo_put_deferred().
 *
 * Puts all bos whose put was deferred by xe_bo_put_deferred().
 * The @deferred list can be either an onstack local list or a global
 * shared list used by a workqueue.
 */
void xe_bo_put_commit(struct llist_head *deferred)
{
	struct llist_node *freed;
	struct xe_bo *bo, *next;

	if (!deferred)
		return;

	freed = llist_del_all(deferred);
	if (!freed)
		return;

	llist_for_each_entry_safe(bo, next, freed, freed)
		drm_gem_object_free(&bo->ttm.base.refcount);
}

/**
 * xe_bo_dumb_create - Create a dumb bo as backing for a fb
 * @file_priv: ...
 * @dev: ...
 * @args: ...
 *
 * See dumb_create() hook in include/drm/drm_drv.h
 *
 * Return: ...
 */
int xe_bo_dumb_create(struct drm_file *file_priv,
		      struct drm_device *dev,
		      struct drm_mode_create_dumb *args)
{
	struct xe_device *xe = to_xe_device(dev);
	struct xe_bo *bo;
	uint32_t handle;
	int cpp = DIV_ROUND_UP(args->bpp, 8);
	int err;
	u32 page_size = max_t(u32, PAGE_SIZE,
		xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K);

	args->pitch = ALIGN(args->width * cpp, 64);
	args->size = ALIGN(mul_u32_u32(args->pitch, args->height),
			   page_size);

	bo = xe_bo_create(xe, NULL, NULL, args->size, ttm_bo_type_device,
2127
			  XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
2128 2129
			  XE_BO_CREATE_USER_BIT | XE_BO_SCANOUT_BIT |
			  XE_BO_NEEDS_CPU_ACCESS);
2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143
	if (IS_ERR(bo))
		return PTR_ERR(bo);

	err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle);
	/* drop reference from allocate - handle holds it now */
	drm_gem_object_put(&bo->ttm.base);
	if (!err)
		args->handle = handle;
	return err;
}

#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_bo.c"
#endif