Commit 62742d12 authored by Lucas De Marchi's avatar Lucas De Marchi

drm/xe: Normalize bo flags macros

The flags stored in the BO grew over time without following
much a naming pattern. First of all, get rid of the _BIT suffix that was
banned from everywhere else due to the guideline in
drivers/gpu/drm/i915/i915_reg.h that xe kind of follows:

	Define bits using ``REG_BIT(N)``. Do **not** add ``_BIT`` suffix to the name.

Here the flags aren't for a register, but it's good practice to keep it
consistent.

Second divergence on names is the use or not of "CREATE". This is
because most of the flags are passed to xe_bo_create*() family of
functions, changing its behavior. However, since the flags are also
stored in the bo itself and checked elsewhere in the code, it seems
better to just omit the CREATE part.

With those 2 guidelines, all the flags are given the form
XE_BO_FLAG_<FLAG_NAME> with the following commands:

	git grep -le "XE_BO_" -- drivers/gpu/drm/xe | xargs sed -i \
		-e "s/XE_BO_\([_A-Z0-9]*\)_BIT/XE_BO_\1/g" \
		-e 's/XE_BO_CREATE_/XE_BO_FLAG_/g'
	git grep -le "XE_BO_" -- drivers/gpu/drm/xe | xargs sed -i -r \
		-e 's/XE_BO_(DEFER_BACKING|SCANOUT|FIXED_PLACEMENT|PAGETABLE|NEEDS_CPU_ACCESS|NEEDS_UC|INTERNAL_TEST|INTERNAL_64K|GGTT_INVALIDATE)/XE_BO_FLAG_\1/g'

And then the defines in drivers/gpu/drm/xe/xe_bo.h are adjusted to
follow the coding style.
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240322142702.186529-3-lucas.demarchi@intel.comSigned-off-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
parent e27f8a45
...@@ -17,7 +17,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe, ...@@ -17,7 +17,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
{ {
struct xe_bo *bo; struct xe_bo *bo;
int err; int err;
u32 flags = XE_BO_CREATE_PINNED_BIT | XE_BO_CREATE_STOLEN_BIT; u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN;
if (align) if (align)
size = ALIGN(size, align); size = ALIGN(size, align);
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
void intel_fb_bo_framebuffer_fini(struct xe_bo *bo) void intel_fb_bo_framebuffer_fini(struct xe_bo *bo)
{ {
if (bo->flags & XE_BO_CREATE_PINNED_BIT) { if (bo->flags & XE_BO_FLAG_PINNED) {
/* Unpin our kernel fb first */ /* Unpin our kernel fb first */
xe_bo_lock(bo, false); xe_bo_lock(bo, false);
xe_bo_unpin(bo); xe_bo_unpin(bo);
...@@ -33,9 +33,9 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, ...@@ -33,9 +33,9 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
if (ret) if (ret)
return ret; return ret;
if (!(bo->flags & XE_BO_SCANOUT_BIT)) { if (!(bo->flags & XE_BO_FLAG_SCANOUT)) {
/* /*
* XE_BO_SCANOUT_BIT should ideally be set at creation, or is * XE_BO_FLAG_SCANOUT should ideally be set at creation, or is
* automatically set when creating FB. We cannot change caching * automatically set when creating FB. We cannot change caching
* mode when the boect is VM_BINDed, so we can only set * mode when the boect is VM_BINDed, so we can only set
* coherency with display when unbound. * coherency with display when unbound.
...@@ -44,7 +44,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, ...@@ -44,7 +44,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
ttm_bo_unreserve(&bo->ttm); ttm_bo_unreserve(&bo->ttm);
return -EINVAL; return -EINVAL;
} }
bo->flags |= XE_BO_SCANOUT_BIT; bo->flags |= XE_BO_FLAG_SCANOUT;
} }
ttm_bo_unreserve(&bo->ttm); ttm_bo_unreserve(&bo->ttm);
......
...@@ -42,9 +42,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, ...@@ -42,9 +42,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
if (!IS_DGFX(dev_priv)) { if (!IS_DGFX(dev_priv)) {
obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv),
NULL, size, NULL, size,
ttm_bo_type_kernel, XE_BO_SCANOUT_BIT | ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_CREATE_STOLEN_BIT | XE_BO_FLAG_STOLEN |
XE_BO_CREATE_PINNED_BIT); XE_BO_FLAG_PINNED);
if (!IS_ERR(obj)) if (!IS_ERR(obj))
drm_info(&dev_priv->drm, "Allocated fbdev into stolen\n"); drm_info(&dev_priv->drm, "Allocated fbdev into stolen\n");
else else
...@@ -52,9 +52,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, ...@@ -52,9 +52,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
} }
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), NULL, size, obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), NULL, size,
ttm_bo_type_kernel, XE_BO_SCANOUT_BIT | ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) | XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) |
XE_BO_CREATE_PINNED_BIT); XE_BO_FLAG_PINNED);
} }
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
...@@ -81,8 +81,8 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info ...@@ -81,8 +81,8 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
{ {
struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
if (!(obj->flags & XE_BO_CREATE_SYSTEM_BIT)) { if (!(obj->flags & XE_BO_FLAG_SYSTEM)) {
if (obj->flags & XE_BO_CREATE_STOLEN_BIT) if (obj->flags & XE_BO_FLAG_STOLEN)
info->fix.smem_start = xe_ttm_stolen_io_offset(obj, 0); info->fix.smem_start = xe_ttm_stolen_io_offset(obj, 0);
else else
info->fix.smem_start = info->fix.smem_start =
......
...@@ -45,8 +45,8 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d ...@@ -45,8 +45,8 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d
obj = xe_bo_create_pin_map(i915, xe_device_get_root_tile(i915), obj = xe_bo_create_pin_map(i915, xe_device_get_root_tile(i915),
NULL, PAGE_ALIGN(size), NULL, PAGE_ALIGN(size),
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) | XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) |
XE_BO_CREATE_GGTT_BIT); XE_BO_FLAG_GGTT);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
kfree(vma); kfree(vma);
return false; return false;
......
...@@ -99,21 +99,21 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb, ...@@ -99,21 +99,21 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb,
if (IS_DGFX(xe)) if (IS_DGFX(xe))
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM0_BIT | XE_BO_FLAG_VRAM0 |
XE_BO_CREATE_GGTT_BIT | XE_BO_FLAG_GGTT |
XE_BO_PAGETABLE); XE_BO_FLAG_PAGETABLE);
else else
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_STOLEN_BIT | XE_BO_FLAG_STOLEN |
XE_BO_CREATE_GGTT_BIT | XE_BO_FLAG_GGTT |
XE_BO_PAGETABLE); XE_BO_FLAG_PAGETABLE);
if (IS_ERR(dpt)) if (IS_ERR(dpt))
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT | XE_BO_FLAG_SYSTEM |
XE_BO_CREATE_GGTT_BIT | XE_BO_FLAG_GGTT |
XE_BO_PAGETABLE); XE_BO_FLAG_PAGETABLE);
if (IS_ERR(dpt)) if (IS_ERR(dpt))
return PTR_ERR(dpt); return PTR_ERR(dpt);
...@@ -262,7 +262,7 @@ static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb, ...@@ -262,7 +262,7 @@ static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb,
if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) && if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 && intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
!(bo->flags & XE_BO_NEEDS_CPU_ACCESS)) { !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) {
struct xe_tile *tile = xe_device_get_root_tile(xe); struct xe_tile *tile = xe_device_get_root_tile(xe);
/* /*
...@@ -355,7 +355,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state) ...@@ -355,7 +355,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
struct i915_vma *vma; struct i915_vma *vma;
/* We reject creating !SCANOUT fb's, so this is weird.. */ /* We reject creating !SCANOUT fb's, so this is weird.. */
drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_SCANOUT_BIT)); drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT));
vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt); vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt);
if (IS_ERR(vma)) if (IS_ERR(vma))
......
...@@ -73,8 +73,8 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe, ...@@ -73,8 +73,8 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
xe_device_mem_access_get(xe); xe_device_mem_access_get(xe);
bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2, bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2,
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT | XE_BO_FLAG_SYSTEM |
XE_BO_CREATE_GGTT_BIT); XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) { if (IS_ERR(bo)) {
drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n"); drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
......
...@@ -62,7 +62,7 @@ initial_plane_bo(struct xe_device *xe, ...@@ -62,7 +62,7 @@ initial_plane_bo(struct xe_device *xe,
if (plane_config->size == 0) if (plane_config->size == 0)
return NULL; return NULL;
flags = XE_BO_CREATE_PINNED_BIT | XE_BO_SCANOUT_BIT | XE_BO_CREATE_GGTT_BIT; flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
base = round_down(plane_config->base, page_size); base = round_down(plane_config->base, page_size);
if (IS_DGFX(xe)) { if (IS_DGFX(xe)) {
...@@ -79,7 +79,7 @@ initial_plane_bo(struct xe_device *xe, ...@@ -79,7 +79,7 @@ initial_plane_bo(struct xe_device *xe,
} }
phys_base = pte & ~(page_size - 1); phys_base = pte & ~(page_size - 1);
flags |= XE_BO_CREATE_VRAM0_BIT; flags |= XE_BO_FLAG_VRAM0;
/* /*
* We don't currently expect this to ever be placed in the * We don't currently expect this to ever be placed in the
...@@ -101,7 +101,7 @@ initial_plane_bo(struct xe_device *xe, ...@@ -101,7 +101,7 @@ initial_plane_bo(struct xe_device *xe,
if (!stolen) if (!stolen)
return NULL; return NULL;
phys_base = base; phys_base = base;
flags |= XE_BO_CREATE_STOLEN_BIT; flags |= XE_BO_FLAG_STOLEN;
/* /*
* If the FB is too big, just don't use it since fbdev is not very * If the FB is too big, just don't use it since fbdev is not very
......
...@@ -116,7 +116,7 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile, ...@@ -116,7 +116,7 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
int ret; int ret;
/* TODO: Sanity check */ /* TODO: Sanity check */
unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile); unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
if (IS_DGFX(xe)) if (IS_DGFX(xe))
kunit_info(test, "Testing vram id %u\n", tile->id); kunit_info(test, "Testing vram id %u\n", tile->id);
...@@ -186,7 +186,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_ccs_migrate_kunit); ...@@ -186,7 +186,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_ccs_migrate_kunit);
static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test) static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test)
{ {
struct xe_bo *bo, *external; struct xe_bo *bo, *external;
unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile); unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate); struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
struct xe_gt *__gt; struct xe_gt *__gt;
int err, i, id; int err, i, id;
......
...@@ -36,14 +36,14 @@ static void check_residency(struct kunit *test, struct xe_bo *exported, ...@@ -36,14 +36,14 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
xe_bo_assert_held(imported); xe_bo_assert_held(imported);
mem_type = XE_PL_VRAM0; mem_type = XE_PL_VRAM0;
if (!(params->mem_mask & XE_BO_CREATE_VRAM0_BIT)) if (!(params->mem_mask & XE_BO_FLAG_VRAM0))
/* No VRAM allowed */ /* No VRAM allowed */
mem_type = XE_PL_TT; mem_type = XE_PL_TT;
else if (params->force_different_devices && !p2p_enabled(params)) else if (params->force_different_devices && !p2p_enabled(params))
/* No P2P */ /* No P2P */
mem_type = XE_PL_TT; mem_type = XE_PL_TT;
else if (params->force_different_devices && !is_dynamic(params) && else if (params->force_different_devices && !is_dynamic(params) &&
(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) (params->mem_mask & XE_BO_FLAG_SYSTEM))
/* Pin migrated to TT */ /* Pin migrated to TT */
mem_type = XE_PL_TT; mem_type = XE_PL_TT;
...@@ -93,7 +93,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported, ...@@ -93,7 +93,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
* possible, saving a migration step as the transfer is just * possible, saving a migration step as the transfer is just
* likely as fast from system memory. * likely as fast from system memory.
*/ */
if (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT) if (params->mem_mask & XE_BO_FLAG_SYSTEM)
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT)); KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
else else
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type)); KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
...@@ -115,11 +115,11 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe) ...@@ -115,11 +115,11 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
/* No VRAM on this device? */ /* No VRAM on this device? */
if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) && if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
(params->mem_mask & XE_BO_CREATE_VRAM0_BIT)) (params->mem_mask & XE_BO_FLAG_VRAM0))
return; return;
size = PAGE_SIZE; size = PAGE_SIZE;
if ((params->mem_mask & XE_BO_CREATE_VRAM0_BIT) && if ((params->mem_mask & XE_BO_FLAG_VRAM0) &&
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
size = SZ_64K; size = SZ_64K;
...@@ -148,7 +148,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe) ...@@ -148,7 +148,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
*/ */
if (params->force_different_devices && if (params->force_different_devices &&
!p2p_enabled(params) && !p2p_enabled(params) &&
!(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) { !(params->mem_mask & XE_BO_FLAG_SYSTEM)) {
KUNIT_FAIL(test, KUNIT_FAIL(test,
"xe_gem_prime_import() succeeded when it shouldn't have\n"); "xe_gem_prime_import() succeeded when it shouldn't have\n");
} else { } else {
...@@ -161,7 +161,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe) ...@@ -161,7 +161,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
/* Pinning in VRAM is not allowed. */ /* Pinning in VRAM is not allowed. */
if (!is_dynamic(params) && if (!is_dynamic(params) &&
params->force_different_devices && params->force_different_devices &&
!(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) !(params->mem_mask & XE_BO_FLAG_SYSTEM))
KUNIT_EXPECT_EQ(test, err, -EINVAL); KUNIT_EXPECT_EQ(test, err, -EINVAL);
/* Otherwise only expect interrupts or success. */ /* Otherwise only expect interrupts or success. */
else if (err && err != -EINTR && err != -ERESTARTSYS) else if (err && err != -EINTR && err != -ERESTARTSYS)
...@@ -180,7 +180,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe) ...@@ -180,7 +180,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
PTR_ERR(import)); PTR_ERR(import));
} else if (!params->force_different_devices || } else if (!params->force_different_devices ||
p2p_enabled(params) || p2p_enabled(params) ||
(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) { (params->mem_mask & XE_BO_FLAG_SYSTEM)) {
/* Shouldn't fail if we can reuse same bo, use p2p or use system */ /* Shouldn't fail if we can reuse same bo, use p2p or use system */
KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n", KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
PTR_ERR(import)); PTR_ERR(import));
...@@ -203,52 +203,52 @@ static const struct dma_buf_attach_ops nop2p_attach_ops = { ...@@ -203,52 +203,52 @@ static const struct dma_buf_attach_ops nop2p_attach_ops = {
* gem object. * gem object.
*/ */
static const struct dma_buf_test_params test_params[] = { static const struct dma_buf_test_params test_params[] = {
{.mem_mask = XE_BO_CREATE_VRAM0_BIT, {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops}, .attach_ops = &xe_dma_buf_attach_ops},
{.mem_mask = XE_BO_CREATE_VRAM0_BIT, {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops, .attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true}, .force_different_devices = true},
{.mem_mask = XE_BO_CREATE_VRAM0_BIT, {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops}, .attach_ops = &nop2p_attach_ops},
{.mem_mask = XE_BO_CREATE_VRAM0_BIT, {.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops, .attach_ops = &nop2p_attach_ops,
.force_different_devices = true}, .force_different_devices = true},
{.mem_mask = XE_BO_CREATE_VRAM0_BIT}, {.mem_mask = XE_BO_FLAG_VRAM0},
{.mem_mask = XE_BO_CREATE_VRAM0_BIT, {.mem_mask = XE_BO_FLAG_VRAM0,
.force_different_devices = true}, .force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT, {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &xe_dma_buf_attach_ops}, .attach_ops = &xe_dma_buf_attach_ops},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT, {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &xe_dma_buf_attach_ops, .attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true}, .force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT, {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &nop2p_attach_ops}, .attach_ops = &nop2p_attach_ops},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT, {.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &nop2p_attach_ops, .attach_ops = &nop2p_attach_ops,
.force_different_devices = true}, .force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT}, {.mem_mask = XE_BO_FLAG_SYSTEM},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT, {.mem_mask = XE_BO_FLAG_SYSTEM,
.force_different_devices = true}, .force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT, {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops}, .attach_ops = &xe_dma_buf_attach_ops},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT, {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops, .attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true}, .force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT, {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops}, .attach_ops = &nop2p_attach_ops},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT, {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops, .attach_ops = &nop2p_attach_ops,
.force_different_devices = true}, .force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT}, {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT, {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.force_different_devices = true}, .force_different_devices = true},
{} {}
......
...@@ -113,7 +113,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo, ...@@ -113,7 +113,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
bo->size, bo->size,
ttm_bo_type_kernel, ttm_bo_type_kernel,
region | region |
XE_BO_NEEDS_CPU_ACCESS); XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(remote)) { if (IS_ERR(remote)) {
KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %li\n", KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %li\n",
str, PTR_ERR(remote)); str, PTR_ERR(remote));
...@@ -191,7 +191,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo, ...@@ -191,7 +191,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo, static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
struct kunit *test) struct kunit *test)
{ {
test_copy(m, bo, test, XE_BO_CREATE_SYSTEM_BIT); test_copy(m, bo, test, XE_BO_FLAG_SYSTEM);
} }
static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo, static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
...@@ -203,9 +203,9 @@ static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo, ...@@ -203,9 +203,9 @@ static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
return; return;
if (bo->ttm.resource->mem_type == XE_PL_VRAM0) if (bo->ttm.resource->mem_type == XE_PL_VRAM0)
region = XE_BO_CREATE_VRAM1_BIT; region = XE_BO_FLAG_VRAM1;
else else
region = XE_BO_CREATE_VRAM0_BIT; region = XE_BO_FLAG_VRAM0;
test_copy(m, bo, test, region); test_copy(m, bo, test, region);
} }
...@@ -281,8 +281,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) ...@@ -281,8 +281,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M, big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) | XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT); XE_BO_FLAG_PINNED);
if (IS_ERR(big)) { if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big)); KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap; goto vunmap;
...@@ -290,8 +290,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) ...@@ -290,8 +290,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE, pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) | XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT); XE_BO_FLAG_PINNED);
if (IS_ERR(pt)) { if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n", KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt)); PTR_ERR(pt));
...@@ -301,8 +301,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) ...@@ -301,8 +301,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm, tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K, 2 * SZ_4K,
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) | XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT); XE_BO_FLAG_PINNED);
if (IS_ERR(tiny)) { if (IS_ERR(tiny)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n", KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt)); PTR_ERR(pt));
......
...@@ -111,7 +111,7 @@ bool xe_bo_is_stolen_devmem(struct xe_bo *bo) ...@@ -111,7 +111,7 @@ bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
static bool xe_bo_is_user(struct xe_bo *bo) static bool xe_bo_is_user(struct xe_bo *bo)
{ {
return bo->flags & XE_BO_CREATE_USER_BIT; return bo->flags & XE_BO_FLAG_USER;
} }
static struct xe_migrate * static struct xe_migrate *
...@@ -137,7 +137,7 @@ static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res) ...@@ -137,7 +137,7 @@ static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res)
static void try_add_system(struct xe_device *xe, struct xe_bo *bo, static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c) u32 bo_flags, u32 *c)
{ {
if (bo_flags & XE_BO_CREATE_SYSTEM_BIT) { if (bo_flags & XE_BO_FLAG_SYSTEM) {
xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
bo->placements[*c] = (struct ttm_place) { bo->placements[*c] = (struct ttm_place) {
...@@ -164,12 +164,12 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo, ...@@ -164,12 +164,12 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
* For eviction / restore on suspend / resume objects * For eviction / restore on suspend / resume objects
* pinned in VRAM must be contiguous * pinned in VRAM must be contiguous
*/ */
if (bo_flags & (XE_BO_CREATE_PINNED_BIT | if (bo_flags & (XE_BO_FLAG_PINNED |
XE_BO_CREATE_GGTT_BIT)) XE_BO_FLAG_GGTT))
place.flags |= TTM_PL_FLAG_CONTIGUOUS; place.flags |= TTM_PL_FLAG_CONTIGUOUS;
if (io_size < vram->usable_size) { if (io_size < vram->usable_size) {
if (bo_flags & XE_BO_NEEDS_CPU_ACCESS) { if (bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) {
place.fpfn = 0; place.fpfn = 0;
place.lpfn = io_size >> PAGE_SHIFT; place.lpfn = io_size >> PAGE_SHIFT;
} else { } else {
...@@ -183,22 +183,22 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo, ...@@ -183,22 +183,22 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
static void try_add_vram(struct xe_device *xe, struct xe_bo *bo, static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c) u32 bo_flags, u32 *c)
{ {
if (bo_flags & XE_BO_CREATE_VRAM0_BIT) if (bo_flags & XE_BO_FLAG_VRAM0)
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c); add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
if (bo_flags & XE_BO_CREATE_VRAM1_BIT) if (bo_flags & XE_BO_FLAG_VRAM1)
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c); add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
} }
static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo, static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c) u32 bo_flags, u32 *c)
{ {
if (bo_flags & XE_BO_CREATE_STOLEN_BIT) { if (bo_flags & XE_BO_FLAG_STOLEN) {
xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
bo->placements[*c] = (struct ttm_place) { bo->placements[*c] = (struct ttm_place) {
.mem_type = XE_PL_STOLEN, .mem_type = XE_PL_STOLEN,
.flags = bo_flags & (XE_BO_CREATE_PINNED_BIT | .flags = bo_flags & (XE_BO_FLAG_PINNED |
XE_BO_CREATE_GGTT_BIT) ? XE_BO_FLAG_GGTT) ?
TTM_PL_FLAG_CONTIGUOUS : 0, TTM_PL_FLAG_CONTIGUOUS : 0,
}; };
*c += 1; *c += 1;
...@@ -339,7 +339,7 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, ...@@ -339,7 +339,7 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
break; break;
} }
WARN_ON((bo->flags & XE_BO_CREATE_USER_BIT) && !bo->cpu_caching); WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching);
/* /*
* Display scanout is always non-coherent with the CPU cache. * Display scanout is always non-coherent with the CPU cache.
...@@ -347,8 +347,8 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, ...@@ -347,8 +347,8 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
* For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and * For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and
* require a CPU:WC mapping. * require a CPU:WC mapping.
*/ */
if ((!bo->cpu_caching && bo->flags & XE_BO_SCANOUT_BIT) || if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) ||
(xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_PAGETABLE)) (xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_FLAG_PAGETABLE))
caching = ttm_write_combined; caching = ttm_write_combined;
err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages); err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
...@@ -1102,7 +1102,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf) ...@@ -1102,7 +1102,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
struct drm_device *ddev = tbo->base.dev; struct drm_device *ddev = tbo->base.dev;
struct xe_device *xe = to_xe_device(ddev); struct xe_device *xe = to_xe_device(ddev);
struct xe_bo *bo = ttm_to_xe_bo(tbo); struct xe_bo *bo = ttm_to_xe_bo(tbo);
bool needs_rpm = bo->flags & XE_BO_CREATE_VRAM_MASK; bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
vm_fault_t ret; vm_fault_t ret;
int idx; int idx;
...@@ -1215,19 +1215,19 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, ...@@ -1215,19 +1215,19 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (flags & (XE_BO_CREATE_VRAM_MASK | XE_BO_CREATE_STOLEN_BIT) && if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
!(flags & XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT) && !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) || ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
(flags & XE_BO_NEEDS_64K))) { (flags & XE_BO_NEEDS_64K))) {
aligned_size = ALIGN(size, SZ_64K); aligned_size = ALIGN(size, SZ_64K);
if (type != ttm_bo_type_device) if (type != ttm_bo_type_device)
size = ALIGN(size, SZ_64K); size = ALIGN(size, SZ_64K);
flags |= XE_BO_INTERNAL_64K; flags |= XE_BO_FLAG_INTERNAL_64K;
alignment = SZ_64K >> PAGE_SHIFT; alignment = SZ_64K >> PAGE_SHIFT;
} else { } else {
aligned_size = ALIGN(size, SZ_4K); aligned_size = ALIGN(size, SZ_4K);
flags &= ~XE_BO_INTERNAL_64K; flags &= ~XE_BO_FLAG_INTERNAL_64K;
alignment = SZ_4K >> PAGE_SHIFT; alignment = SZ_4K >> PAGE_SHIFT;
} }
...@@ -1256,11 +1256,11 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, ...@@ -1256,11 +1256,11 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size); drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
if (resv) { if (resv) {
ctx.allow_res_evict = !(flags & XE_BO_CREATE_NO_RESV_EVICT); ctx.allow_res_evict = !(flags & XE_BO_FLAG_NO_RESV_EVICT);
ctx.resv = resv; ctx.resv = resv;
} }
if (!(flags & XE_BO_FIXED_PLACEMENT_BIT)) { if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) {
err = __xe_bo_placement_for_flags(xe, bo, bo->flags); err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
if (WARN_ON(err)) { if (WARN_ON(err)) {
xe_ttm_bo_destroy(&bo->ttm); xe_ttm_bo_destroy(&bo->ttm);
...@@ -1270,7 +1270,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, ...@@ -1270,7 +1270,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
/* Defer populating type_sg bos */ /* Defer populating type_sg bos */
placement = (type == ttm_bo_type_sg || placement = (type == ttm_bo_type_sg ||
bo->flags & XE_BO_DEFER_BACKING) ? &sys_placement : bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement :
&bo->placement; &bo->placement;
err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type, err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
placement, alignment, placement, alignment,
...@@ -1325,21 +1325,21 @@ static int __xe_bo_fixed_placement(struct xe_device *xe, ...@@ -1325,21 +1325,21 @@ static int __xe_bo_fixed_placement(struct xe_device *xe,
{ {
struct ttm_place *place = bo->placements; struct ttm_place *place = bo->placements;
if (flags & (XE_BO_CREATE_USER_BIT|XE_BO_CREATE_SYSTEM_BIT)) if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM))
return -EINVAL; return -EINVAL;
place->flags = TTM_PL_FLAG_CONTIGUOUS; place->flags = TTM_PL_FLAG_CONTIGUOUS;
place->fpfn = start >> PAGE_SHIFT; place->fpfn = start >> PAGE_SHIFT;
place->lpfn = end >> PAGE_SHIFT; place->lpfn = end >> PAGE_SHIFT;
switch (flags & (XE_BO_CREATE_STOLEN_BIT | XE_BO_CREATE_VRAM_MASK)) { switch (flags & (XE_BO_FLAG_STOLEN | XE_BO_FLAG_VRAM_MASK)) {
case XE_BO_CREATE_VRAM0_BIT: case XE_BO_FLAG_VRAM0:
place->mem_type = XE_PL_VRAM0; place->mem_type = XE_PL_VRAM0;
break; break;
case XE_BO_CREATE_VRAM1_BIT: case XE_BO_FLAG_VRAM1:
place->mem_type = XE_PL_VRAM1; place->mem_type = XE_PL_VRAM1;
break; break;
case XE_BO_CREATE_STOLEN_BIT: case XE_BO_FLAG_STOLEN:
place->mem_type = XE_PL_STOLEN; place->mem_type = XE_PL_STOLEN;
break; break;
...@@ -1373,7 +1373,7 @@ __xe_bo_create_locked(struct xe_device *xe, ...@@ -1373,7 +1373,7 @@ __xe_bo_create_locked(struct xe_device *xe,
if (IS_ERR(bo)) if (IS_ERR(bo))
return bo; return bo;
flags |= XE_BO_FIXED_PLACEMENT_BIT; flags |= XE_BO_FLAG_FIXED_PLACEMENT;
err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size); err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
if (err) { if (err) {
xe_bo_free(bo); xe_bo_free(bo);
...@@ -1383,7 +1383,7 @@ __xe_bo_create_locked(struct xe_device *xe, ...@@ -1383,7 +1383,7 @@ __xe_bo_create_locked(struct xe_device *xe,
bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL, bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
vm && !xe_vm_in_fault_mode(vm) && vm && !xe_vm_in_fault_mode(vm) &&
flags & XE_BO_CREATE_USER_BIT ? flags & XE_BO_FLAG_USER ?
&vm->lru_bulk_move : NULL, size, &vm->lru_bulk_move : NULL, size,
cpu_caching, type, flags); cpu_caching, type, flags);
if (IS_ERR(bo)) if (IS_ERR(bo))
...@@ -1400,13 +1400,13 @@ __xe_bo_create_locked(struct xe_device *xe, ...@@ -1400,13 +1400,13 @@ __xe_bo_create_locked(struct xe_device *xe,
xe_vm_get(vm); xe_vm_get(vm);
bo->vm = vm; bo->vm = vm;
if (bo->flags & XE_BO_CREATE_GGTT_BIT) { if (bo->flags & XE_BO_FLAG_GGTT) {
if (!tile && flags & XE_BO_CREATE_STOLEN_BIT) if (!tile && flags & XE_BO_FLAG_STOLEN)
tile = xe_device_get_root_tile(xe); tile = xe_device_get_root_tile(xe);
xe_assert(xe, tile); xe_assert(xe, tile);
if (flags & XE_BO_FIXED_PLACEMENT_BIT) { if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo, err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo,
start + bo->size, U64_MAX); start + bo->size, U64_MAX);
} else { } else {
...@@ -1449,7 +1449,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, ...@@ -1449,7 +1449,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
{ {
struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
cpu_caching, type, cpu_caching, type,
flags | XE_BO_CREATE_USER_BIT); flags | XE_BO_FLAG_USER);
if (!IS_ERR(bo)) if (!IS_ERR(bo))
xe_bo_unlock_vm_held(bo); xe_bo_unlock_vm_held(bo);
...@@ -1478,12 +1478,12 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile ...@@ -1478,12 +1478,12 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
u64 start = offset == ~0ull ? 0 : offset; u64 start = offset == ~0ull ? 0 : offset;
u64 end = offset == ~0ull ? offset : start + size; u64 end = offset == ~0ull ? offset : start + size;
if (flags & XE_BO_CREATE_STOLEN_BIT && if (flags & XE_BO_FLAG_STOLEN &&
xe_ttm_stolen_cpu_access_needs_ggtt(xe)) xe_ttm_stolen_cpu_access_needs_ggtt(xe))
flags |= XE_BO_CREATE_GGTT_BIT; flags |= XE_BO_FLAG_GGTT;
bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type, bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
flags | XE_BO_NEEDS_CPU_ACCESS); flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo)) if (IS_ERR(bo))
return bo; return bo;
...@@ -1580,9 +1580,9 @@ struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_til ...@@ -1580,9 +1580,9 @@ struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_til
int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src) int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src)
{ {
struct xe_bo *bo; struct xe_bo *bo;
u32 dst_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile) | XE_BO_CREATE_GGTT_BIT; u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT;
dst_flags |= (*src)->flags & XE_BO_GGTT_INVALIDATE; dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE;
xe_assert(xe, IS_DGFX(xe)); xe_assert(xe, IS_DGFX(xe));
xe_assert(xe, !(*src)->vmap.is_iomem); xe_assert(xe, !(*src)->vmap.is_iomem);
...@@ -1663,8 +1663,8 @@ int xe_bo_pin(struct xe_bo *bo) ...@@ -1663,8 +1663,8 @@ int xe_bo_pin(struct xe_bo *bo)
xe_assert(xe, !xe_bo_is_user(bo)); xe_assert(xe, !xe_bo_is_user(bo));
/* Pinned object must be in GGTT or have pinned flag */ /* Pinned object must be in GGTT or have pinned flag */
xe_assert(xe, bo->flags & (XE_BO_CREATE_PINNED_BIT | xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED |
XE_BO_CREATE_GGTT_BIT)); XE_BO_FLAG_GGTT));
/* /*
* No reason we can't support pinning imported dma-bufs we just don't * No reason we can't support pinning imported dma-bufs we just don't
...@@ -1685,7 +1685,7 @@ int xe_bo_pin(struct xe_bo *bo) ...@@ -1685,7 +1685,7 @@ int xe_bo_pin(struct xe_bo *bo)
* during suspend / resume (force restore to same physical address). * during suspend / resume (force restore to same physical address).
*/ */
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) && if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
bo->flags & XE_BO_INTERNAL_TEST)) { bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
struct ttm_place *place = &(bo->placements[0]); struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) { if (mem_type_is_vram(place->mem_type)) {
...@@ -1753,7 +1753,7 @@ void xe_bo_unpin(struct xe_bo *bo) ...@@ -1753,7 +1753,7 @@ void xe_bo_unpin(struct xe_bo *bo)
xe_assert(xe, xe_bo_is_pinned(bo)); xe_assert(xe, xe_bo_is_pinned(bo));
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) && if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
bo->flags & XE_BO_INTERNAL_TEST)) { bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
struct ttm_place *place = &(bo->placements[0]); struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) { if (mem_type_is_vram(place->mem_type)) {
...@@ -1856,7 +1856,7 @@ int xe_bo_vmap(struct xe_bo *bo) ...@@ -1856,7 +1856,7 @@ int xe_bo_vmap(struct xe_bo *bo)
xe_bo_assert_held(bo); xe_bo_assert_held(bo);
if (!(bo->flags & XE_BO_NEEDS_CPU_ACCESS)) if (!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS))
return -EINVAL; return -EINVAL;
if (!iosys_map_is_null(&bo->vmap)) if (!iosys_map_is_null(&bo->vmap))
...@@ -1938,29 +1938,29 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -1938,29 +1938,29 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
bo_flags = 0; bo_flags = 0;
if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING) if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING)
bo_flags |= XE_BO_DEFER_BACKING; bo_flags |= XE_BO_FLAG_DEFER_BACKING;
if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT) if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT)
bo_flags |= XE_BO_SCANOUT_BIT; bo_flags |= XE_BO_FLAG_SCANOUT;
bo_flags |= args->placement << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1); bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1);
if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) { if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_CREATE_VRAM_MASK))) if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK)))
return -EINVAL; return -EINVAL;
bo_flags |= XE_BO_NEEDS_CPU_ACCESS; bo_flags |= XE_BO_FLAG_NEEDS_CPU_ACCESS;
} }
if (XE_IOCTL_DBG(xe, !args->cpu_caching || if (XE_IOCTL_DBG(xe, !args->cpu_caching ||
args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC)) args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_CREATE_VRAM_MASK && if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK &&
args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC)) args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_SCANOUT_BIT && if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT &&
args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB))
return -EINVAL; return -EINVAL;
...@@ -2209,7 +2209,7 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo) ...@@ -2209,7 +2209,7 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
* can't be used since there's no CCS storage associated with * can't be used since there's no CCS storage associated with
* non-VRAM addresses. * non-VRAM addresses.
*/ */
if (IS_DGFX(xe) && (bo->flags & XE_BO_CREATE_SYSTEM_BIT)) if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM))
return false; return false;
return true; return true;
...@@ -2278,9 +2278,9 @@ int xe_bo_dumb_create(struct drm_file *file_priv, ...@@ -2278,9 +2278,9 @@ int xe_bo_dumb_create(struct drm_file *file_priv,
bo = xe_bo_create_user(xe, NULL, NULL, args->size, bo = xe_bo_create_user(xe, NULL, NULL, args->size,
DRM_XE_GEM_CPU_CACHING_WC, DRM_XE_GEM_CPU_CACHING_WC,
ttm_bo_type_device, ttm_bo_type_device,
XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
XE_BO_SCANOUT_BIT | XE_BO_FLAG_SCANOUT |
XE_BO_NEEDS_CPU_ACCESS); XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo)) if (IS_ERR(bo))
return PTR_ERR(bo); return PTR_ERR(bo);
......
...@@ -23,33 +23,32 @@ ...@@ -23,33 +23,32 @@
#define XE_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */ #define XE_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define XE_BO_CREATE_USER_BIT BIT(0) #define XE_BO_FLAG_USER BIT(0)
/* The bits below need to be contiguous, or things break */ /* The bits below need to be contiguous, or things break */
#define XE_BO_CREATE_SYSTEM_BIT BIT(1) #define XE_BO_FLAG_SYSTEM BIT(1)
#define XE_BO_CREATE_VRAM0_BIT BIT(2) #define XE_BO_FLAG_VRAM0 BIT(2)
#define XE_BO_CREATE_VRAM1_BIT BIT(3) #define XE_BO_FLAG_VRAM1 BIT(3)
#define XE_BO_CREATE_VRAM_MASK (XE_BO_CREATE_VRAM0_BIT | \ #define XE_BO_FLAG_VRAM_MASK (XE_BO_FLAG_VRAM0 | XE_BO_FLAG_VRAM1)
XE_BO_CREATE_VRAM1_BIT)
/* -- */ /* -- */
#define XE_BO_CREATE_STOLEN_BIT BIT(4) #define XE_BO_FLAG_STOLEN BIT(4)
#define XE_BO_CREATE_VRAM_IF_DGFX(tile) \ #define XE_BO_FLAG_VRAM_IF_DGFX(tile) (IS_DGFX(tile_to_xe(tile)) ? \
(IS_DGFX(tile_to_xe(tile)) ? XE_BO_CREATE_VRAM0_BIT << (tile)->id : \ XE_BO_FLAG_VRAM0 << (tile)->id : \
XE_BO_CREATE_SYSTEM_BIT) XE_BO_FLAG_SYSTEM)
#define XE_BO_CREATE_GGTT_BIT BIT(5) #define XE_BO_FLAG_GGTT BIT(5)
#define XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT BIT(6) #define XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE BIT(6)
#define XE_BO_CREATE_PINNED_BIT BIT(7) #define XE_BO_FLAG_PINNED BIT(7)
#define XE_BO_CREATE_NO_RESV_EVICT BIT(8) #define XE_BO_FLAG_NO_RESV_EVICT BIT(8)
#define XE_BO_DEFER_BACKING BIT(9) #define XE_BO_FLAG_DEFER_BACKING BIT(9)
#define XE_BO_SCANOUT_BIT BIT(10) #define XE_BO_FLAG_SCANOUT BIT(10)
#define XE_BO_FIXED_PLACEMENT_BIT BIT(11) #define XE_BO_FLAG_FIXED_PLACEMENT BIT(11)
#define XE_BO_PAGETABLE BIT(12) #define XE_BO_FLAG_PAGETABLE BIT(12)
#define XE_BO_NEEDS_CPU_ACCESS BIT(13) #define XE_BO_FLAG_NEEDS_CPU_ACCESS BIT(13)
#define XE_BO_NEEDS_UC BIT(14) #define XE_BO_FLAG_NEEDS_UC BIT(14)
#define XE_BO_NEEDS_64K BIT(15) #define XE_BO_NEEDS_64K BIT(15)
#define XE_BO_GGTT_INVALIDATE BIT(16) #define XE_BO_FLAG_GGTT_INVALIDATE BIT(16)
/* this one is trigger internally only */ /* this one is trigger internally only */
#define XE_BO_INTERNAL_TEST BIT(30) #define XE_BO_FLAG_INTERNAL_TEST BIT(30)
#define XE_BO_INTERNAL_64K BIT(31) #define XE_BO_FLAG_INTERNAL_64K BIT(31)
#define XELPG_PPGTT_PTE_PAT3 BIT_ULL(62) #define XELPG_PPGTT_PTE_PAT3 BIT_ULL(62)
#define XE2_PPGTT_PTE_PAT4 BIT_ULL(61) #define XE2_PPGTT_PTE_PAT4 BIT_ULL(61)
......
...@@ -146,7 +146,7 @@ int xe_bo_restore_kernel(struct xe_device *xe) ...@@ -146,7 +146,7 @@ int xe_bo_restore_kernel(struct xe_device *xe)
return ret; return ret;
} }
if (bo->flags & XE_BO_CREATE_GGTT_BIT) { if (bo->flags & XE_BO_FLAG_GGTT) {
struct xe_tile *tile = bo->tile; struct xe_tile *tile = bo->tile;
mutex_lock(&tile->mem.ggtt->lock); mutex_lock(&tile->mem.ggtt->lock);
......
...@@ -217,7 +217,7 @@ xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage, ...@@ -217,7 +217,7 @@ xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
dma_resv_lock(resv, NULL); dma_resv_lock(resv, NULL);
bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size, bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
0, /* Will require 1way or 2way for vm_bind */ 0, /* Will require 1way or 2way for vm_bind */
ttm_bo_type_sg, XE_BO_CREATE_SYSTEM_BIT); ttm_bo_type_sg, XE_BO_FLAG_SYSTEM);
if (IS_ERR(bo)) { if (IS_ERR(bo)) {
ret = PTR_ERR(bo); ret = PTR_ERR(bo);
goto error; goto error;
......
...@@ -224,11 +224,11 @@ int xe_ggtt_init(struct xe_ggtt *ggtt) ...@@ -224,11 +224,11 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
* scratch entires, rather keep the scratch page in system memory on * scratch entires, rather keep the scratch page in system memory on
* platforms where 64K pages are needed for VRAM. * platforms where 64K pages are needed for VRAM.
*/ */
flags = XE_BO_CREATE_PINNED_BIT; flags = XE_BO_FLAG_PINNED;
if (ggtt->flags & XE_GGTT_FLAGS_64K) if (ggtt->flags & XE_GGTT_FLAGS_64K)
flags |= XE_BO_CREATE_SYSTEM_BIT; flags |= XE_BO_FLAG_SYSTEM;
else else
flags |= XE_BO_CREATE_VRAM_IF_DGFX(ggtt->tile); flags |= XE_BO_FLAG_VRAM_IF_DGFX(ggtt->tile);
ggtt->scratch = xe_managed_bo_create_pin_map(xe, ggtt->tile, XE_PAGE_SIZE, flags); ggtt->scratch = xe_managed_bo_create_pin_map(xe, ggtt->tile, XE_PAGE_SIZE, flags);
if (IS_ERR(ggtt->scratch)) { if (IS_ERR(ggtt->scratch)) {
...@@ -375,7 +375,7 @@ int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node, ...@@ -375,7 +375,7 @@ int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{ {
u16 cache_mode = bo->flags & XE_BO_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB; u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode]; u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
u64 start = bo->ggtt_node.start; u64 start = bo->ggtt_node.start;
u64 offset, pte; u64 offset, pte;
...@@ -413,7 +413,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, ...@@ -413,7 +413,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
xe_ggtt_map_bo(ggtt, bo); xe_ggtt_map_bo(ggtt, bo);
mutex_unlock(&ggtt->lock); mutex_unlock(&ggtt->lock);
if (!err && bo->flags & XE_BO_GGTT_INVALIDATE) if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
xe_ggtt_invalidate(ggtt); xe_ggtt_invalidate(ggtt);
xe_device_mem_access_put(tile_to_xe(ggtt->tile)); xe_device_mem_access_put(tile_to_xe(ggtt->tile));
...@@ -457,7 +457,7 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) ...@@ -457,7 +457,7 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size); xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
xe_ggtt_remove_node(ggtt, &bo->ggtt_node, xe_ggtt_remove_node(ggtt, &bo->ggtt_node,
bo->flags & XE_BO_GGTT_INVALIDATE); bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
} }
int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p) int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)
......
...@@ -130,8 +130,8 @@ static int query_compatibility_version(struct xe_gsc *gsc) ...@@ -130,8 +130,8 @@ static int query_compatibility_version(struct xe_gsc *gsc)
bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2, bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2,
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT | XE_BO_FLAG_SYSTEM |
XE_BO_CREATE_GGTT_BIT); XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) { if (IS_ERR(bo)) {
xe_gt_err(gt, "failed to allocate bo for GSC version query\n"); xe_gt_err(gt, "failed to allocate bo for GSC version query\n");
return PTR_ERR(bo); return PTR_ERR(bo);
...@@ -468,8 +468,8 @@ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc) ...@@ -468,8 +468,8 @@ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4M, bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4M,
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_STOLEN_BIT | XE_BO_FLAG_STOLEN |
XE_BO_CREATE_GGTT_BIT); XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) if (IS_ERR(bo))
return PTR_ERR(bo); return PTR_ERR(bo);
......
...@@ -411,8 +411,8 @@ static int proxy_channel_alloc(struct xe_gsc *gsc) ...@@ -411,8 +411,8 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_PROXY_CHANNEL_SIZE, bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_PROXY_CHANNEL_SIZE,
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT | XE_BO_FLAG_SYSTEM |
XE_BO_CREATE_GGTT_BIT); XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) { if (IS_ERR(bo)) {
kfree(csme); kfree(csme);
return PTR_ERR(bo); return PTR_ERR(bo);
......
...@@ -273,9 +273,9 @@ int xe_guc_ads_init(struct xe_guc_ads *ads) ...@@ -273,9 +273,9 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
ads->regset_size = calculate_regset_size(gt); ads->regset_size = calculate_regset_size(gt);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE, bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
XE_BO_CREATE_SYSTEM_BIT | XE_BO_FLAG_SYSTEM |
XE_BO_CREATE_GGTT_BIT | XE_BO_FLAG_GGTT |
XE_BO_GGTT_INVALIDATE); XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo)) if (IS_ERR(bo))
return PTR_ERR(bo); return PTR_ERR(bo);
......
...@@ -159,9 +159,9 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) ...@@ -159,9 +159,9 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
primelockdep(ct); primelockdep(ct);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(), bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
XE_BO_CREATE_SYSTEM_BIT | XE_BO_FLAG_SYSTEM |
XE_BO_CREATE_GGTT_BIT | XE_BO_FLAG_GGTT |
XE_BO_GGTT_INVALIDATE); XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo)) if (IS_ERR(bo))
return PTR_ERR(bo); return PTR_ERR(bo);
......
...@@ -78,9 +78,9 @@ int xe_guc_hwconfig_init(struct xe_guc *guc) ...@@ -78,9 +78,9 @@ int xe_guc_hwconfig_init(struct xe_guc *guc)
return -EINVAL; return -EINVAL;
bo = xe_managed_bo_create_pin_map(xe, tile, PAGE_ALIGN(size), bo = xe_managed_bo_create_pin_map(xe, tile, PAGE_ALIGN(size),
XE_BO_CREATE_SYSTEM_BIT | XE_BO_FLAG_SYSTEM |
XE_BO_CREATE_GGTT_BIT | XE_BO_FLAG_GGTT |
XE_BO_GGTT_INVALIDATE); XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo)) if (IS_ERR(bo))
return PTR_ERR(bo); return PTR_ERR(bo);
guc->hwconfig.bo = bo; guc->hwconfig.bo = bo;
......
...@@ -84,9 +84,9 @@ int xe_guc_log_init(struct xe_guc_log *log) ...@@ -84,9 +84,9 @@ int xe_guc_log_init(struct xe_guc_log *log)
struct xe_bo *bo; struct xe_bo *bo;
bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(), bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(),
XE_BO_CREATE_SYSTEM_BIT | XE_BO_FLAG_SYSTEM |
XE_BO_CREATE_GGTT_BIT | XE_BO_FLAG_GGTT |
XE_BO_GGTT_INVALIDATE); XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo)) if (IS_ERR(bo))
return PTR_ERR(bo); return PTR_ERR(bo);
......
...@@ -929,9 +929,9 @@ int xe_guc_pc_init(struct xe_guc_pc *pc) ...@@ -929,9 +929,9 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
return err; return err;
bo = xe_managed_bo_create_pin_map(xe, tile, size, bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_CREATE_VRAM_IF_DGFX(tile) | XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT | XE_BO_FLAG_GGTT |
XE_BO_GGTT_INVALIDATE); XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo)) if (IS_ERR(bo))
return PTR_ERR(bo); return PTR_ERR(bo);
......
...@@ -59,8 +59,8 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc) ...@@ -59,8 +59,8 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL, bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
PXP43_HUC_AUTH_INOUT_SIZE * 2, PXP43_HUC_AUTH_INOUT_SIZE * 2,
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT | XE_BO_FLAG_SYSTEM |
XE_BO_CREATE_GGTT_BIT); XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) if (IS_ERR(bo))
return PTR_ERR(bo); return PTR_ERR(bo);
......
...@@ -518,9 +518,9 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe, ...@@ -518,9 +518,9 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
xe_reg_sr_apply_whitelist(hwe); xe_reg_sr_apply_whitelist(hwe);
hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K, hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
XE_BO_CREATE_VRAM_IF_DGFX(tile) | XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT | XE_BO_FLAG_GGTT |
XE_BO_GGTT_INVALIDATE); XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(hwe->hwsp)) { if (IS_ERR(hwe->hwsp)) {
err = PTR_ERR(hwe->hwsp); err = PTR_ERR(hwe->hwsp);
goto err_name; goto err_name;
......
...@@ -70,8 +70,8 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level ...@@ -70,8 +70,8 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) * PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
lmtt->ops->lmtt_pte_num(level)), lmtt->ops->lmtt_pte_num(level)),
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) | XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
XE_BO_NEEDS_64K | XE_BO_CREATE_PINNED_BIT); XE_BO_NEEDS_64K | XE_BO_FLAG_PINNED);
if (IS_ERR(bo)) { if (IS_ERR(bo)) {
err = PTR_ERR(bo); err = PTR_ERR(bo);
goto out_free_pt; goto out_free_pt;
......
...@@ -743,9 +743,9 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, ...@@ -743,9 +743,9 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
lrc->bo = xe_bo_create_pin_map(xe, tile, vm, lrc->bo = xe_bo_create_pin_map(xe, tile, vm,
ring_size + xe_lrc_size(xe, hwe->class), ring_size + xe_lrc_size(xe, hwe->class),
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) | XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT | XE_BO_FLAG_GGTT |
XE_BO_GGTT_INVALIDATE); XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(lrc->bo)) if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo); return PTR_ERR(lrc->bo);
......
...@@ -127,11 +127,11 @@ static int memirq_alloc_pages(struct xe_memirq *memirq) ...@@ -127,11 +127,11 @@ static int memirq_alloc_pages(struct xe_memirq *memirq)
/* XXX: convert to managed bo */ /* XXX: convert to managed bo */
bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT | XE_BO_FLAG_SYSTEM |
XE_BO_CREATE_GGTT_BIT | XE_BO_FLAG_GGTT |
XE_BO_GGTT_INVALIDATE | XE_BO_FLAG_GGTT_INVALIDATE |
XE_BO_NEEDS_UC | XE_BO_FLAG_NEEDS_UC |
XE_BO_NEEDS_CPU_ACCESS); XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo)) { if (IS_ERR(bo)) {
err = PTR_ERR(bo); err = PTR_ERR(bo);
goto out; goto out;
......
...@@ -155,8 +155,8 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, ...@@ -155,8 +155,8 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
bo = xe_bo_create_pin_map(vm->xe, tile, vm, bo = xe_bo_create_pin_map(vm->xe, tile, vm,
num_entries * XE_PAGE_SIZE, num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) | XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT); XE_BO_FLAG_PINNED);
if (IS_ERR(bo)) if (IS_ERR(bo))
return PTR_ERR(bo); return PTR_ERR(bo);
......
...@@ -108,11 +108,11 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, ...@@ -108,11 +108,11 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
pt->level = level; pt->level = level;
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K, bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) | XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT | XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
XE_BO_CREATE_PINNED_BIT | XE_BO_FLAG_PINNED |
XE_BO_CREATE_NO_RESV_EVICT | XE_BO_FLAG_NO_RESV_EVICT |
XE_BO_PAGETABLE); XE_BO_FLAG_PAGETABLE);
if (IS_ERR(bo)) { if (IS_ERR(bo)) {
err = PTR_ERR(bo); err = PTR_ERR(bo);
goto err_kfree; goto err_kfree;
......
...@@ -48,9 +48,9 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 ...@@ -48,9 +48,9 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
sa_manager->bo = NULL; sa_manager->bo = NULL;
bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) | XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT | XE_BO_FLAG_GGTT |
XE_BO_GGTT_INVALIDATE); XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo)) { if (IS_ERR(bo)) {
drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n", drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
PTR_ERR(bo)); PTR_ERR(bo));
......
...@@ -303,7 +303,7 @@ static int __xe_ttm_stolen_io_mem_reserve_stolen(struct xe_device *xe, ...@@ -303,7 +303,7 @@ static int __xe_ttm_stolen_io_mem_reserve_stolen(struct xe_device *xe,
XE_WARN_ON(IS_DGFX(xe)); XE_WARN_ON(IS_DGFX(xe));
/* XXX: Require BO to be mapped to GGTT? */ /* XXX: Require BO to be mapped to GGTT? */
if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_CREATE_GGTT_BIT))) if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_GGTT)))
return -EIO; return -EIO;
/* GGTT is always contiguously mapped */ /* GGTT is always contiguously mapped */
......
...@@ -763,8 +763,8 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw) ...@@ -763,8 +763,8 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw)
return 0; return 0;
err = uc_fw_copy(uc_fw, fw->data, fw->size, err = uc_fw_copy(uc_fw, fw->data, fw->size,
XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_GGTT_BIT | XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT |
XE_BO_GGTT_INVALIDATE); XE_BO_FLAG_GGTT_INVALIDATE);
uc_fw_release(fw); uc_fw_release(fw);
......
...@@ -3069,7 +3069,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -3069,7 +3069,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto put_obj; goto put_obj;
} }
if (bos[i]->flags & XE_BO_INTERNAL_64K) { if (bos[i]->flags & XE_BO_FLAG_INTERNAL_64K) {
if (XE_IOCTL_DBG(xe, obj_offset & if (XE_IOCTL_DBG(xe, obj_offset &
XE_64K_PAGE_MASK) || XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) || XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment