Commit c73acc1e authored by Francois Dugast's avatar Francois Dugast Committed by Rodrigo Vivi

drm/xe: Use Xe assert macros instead of XE_WARN_ON macro

The XE_WARN_ON macro maps to WARN_ON which is not justified
in many cases where only a simple debug check is needed.
Replace the use of the XE_WARN_ON macro with the new xe_assert
macros which relies on drm_*. This takes a struct drm_device
argument, which is one of the main changes in this commit. The
other main change is that the condition is reversed, as with
XE_WARN_ON a message is displayed if the condition is true,
whereas with xe_assert it is if the condition is false.

v2:
- Rebase
- Keep WARN splats in xe_wopcm.c (Matt Roper)

v3:
- Rebase
Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 1975b591
...@@ -66,7 +66,7 @@ __xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr) ...@@ -66,7 +66,7 @@ __xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr)
bb->cs[bb->len++] = MI_BATCH_BUFFER_END; bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
WARN_ON(bb->len * 4 + bb_prefetch(q->gt) > size); xe_gt_assert(q->gt, bb->len * 4 + bb_prefetch(q->gt) <= size);
xe_sa_bo_flush_write(bb->bo); xe_sa_bo_flush_write(bb->bo);
...@@ -84,8 +84,8 @@ struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q, ...@@ -84,8 +84,8 @@ struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
4 * second_idx, 4 * second_idx,
}; };
XE_WARN_ON(second_idx > bb->len); xe_gt_assert(q->gt, second_idx <= bb->len);
XE_WARN_ON(!(q->vm->flags & XE_VM_FLAG_MIGRATION)); xe_gt_assert(q->gt, q->vm->flags & XE_VM_FLAG_MIGRATION);
return __xe_bb_create_job(q, bb, addr); return __xe_bb_create_job(q, bb, addr);
} }
...@@ -95,7 +95,7 @@ struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q, ...@@ -95,7 +95,7 @@ struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
{ {
u64 addr = xe_sa_bo_gpu_addr(bb->bo); u64 addr = xe_sa_bo_gpu_addr(bb->bo);
XE_WARN_ON(q->vm && q->vm->flags & XE_VM_FLAG_MIGRATION); xe_gt_assert(q->gt, !(q->vm && q->vm->flags & XE_VM_FLAG_MIGRATION));
return __xe_bb_create_job(q, bb, &addr); return __xe_bb_create_job(q, bb, &addr);
} }
......
...@@ -103,7 +103,7 @@ static bool xe_bo_is_user(struct xe_bo *bo) ...@@ -103,7 +103,7 @@ static bool xe_bo_is_user(struct xe_bo *bo)
static struct xe_tile * static struct xe_tile *
mem_type_to_tile(struct xe_device *xe, u32 mem_type) mem_type_to_tile(struct xe_device *xe, u32 mem_type)
{ {
XE_WARN_ON(mem_type != XE_PL_STOLEN && !mem_type_is_vram(mem_type)); xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type));
return &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)]; return &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
} }
...@@ -142,7 +142,7 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo, ...@@ -142,7 +142,7 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
struct ttm_place place = { .mem_type = mem_type }; struct ttm_place place = { .mem_type = mem_type };
u64 io_size = tile->mem.vram.io_size; u64 io_size = tile->mem.vram.io_size;
XE_WARN_ON(!tile->mem.vram.usable_size); xe_assert(xe, tile->mem.vram.usable_size);
/* /*
* For eviction / restore on suspend / resume objects * For eviction / restore on suspend / resume objects
...@@ -544,10 +544,11 @@ static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo, ...@@ -544,10 +544,11 @@ static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
struct dma_buf_attachment *attach = ttm_bo->base.import_attach; struct dma_buf_attachment *attach = ttm_bo->base.import_attach;
struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt, struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
ttm); ttm);
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
struct sg_table *sg; struct sg_table *sg;
XE_WARN_ON(!attach); xe_assert(xe, attach);
XE_WARN_ON(!ttm_bo->ttm); xe_assert(xe, ttm_bo->ttm);
if (new_res->mem_type == XE_PL_SYSTEM) if (new_res->mem_type == XE_PL_SYSTEM)
goto out; goto out;
...@@ -709,8 +710,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, ...@@ -709,8 +710,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
else if (mem_type_is_vram(old_mem_type)) else if (mem_type_is_vram(old_mem_type))
tile = mem_type_to_tile(xe, old_mem_type); tile = mem_type_to_tile(xe, old_mem_type);
XE_WARN_ON(!tile); xe_assert(xe, tile);
XE_WARN_ON(!tile->migrate); xe_tile_assert(tile, tile->migrate);
trace_xe_bo_move(bo); trace_xe_bo_move(bo);
xe_device_mem_access_get(xe); xe_device_mem_access_get(xe);
...@@ -740,7 +741,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, ...@@ -740,7 +741,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
goto out; goto out;
} }
XE_WARN_ON(new_mem->start != xe_assert(xe, new_mem->start ==
bo->placements->fpfn); bo->placements->fpfn);
iosys_map_set_vaddr_iomem(&bo->vmap, new_addr); iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
...@@ -939,9 +940,10 @@ static void __xe_bo_vunmap(struct xe_bo *bo); ...@@ -939,9 +940,10 @@ static void __xe_bo_vunmap(struct xe_bo *bo);
*/ */
static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo) static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
{ {
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
bool locked; bool locked;
XE_WARN_ON(kref_read(&ttm_bo->kref)); xe_assert(xe, !kref_read(&ttm_bo->kref));
/* /*
* We can typically only race with TTM trylocking under the * We can typically only race with TTM trylocking under the
...@@ -952,7 +954,7 @@ static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo) ...@@ -952,7 +954,7 @@ static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
spin_lock(&ttm_bo->bdev->lru_lock); spin_lock(&ttm_bo->bdev->lru_lock);
locked = dma_resv_trylock(ttm_bo->base.resv); locked = dma_resv_trylock(ttm_bo->base.resv);
spin_unlock(&ttm_bo->bdev->lru_lock); spin_unlock(&ttm_bo->bdev->lru_lock);
XE_WARN_ON(!locked); xe_assert(xe, locked);
return locked; return locked;
} }
...@@ -968,7 +970,7 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo) ...@@ -968,7 +970,7 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
return; return;
bo = ttm_to_xe_bo(ttm_bo); bo = ttm_to_xe_bo(ttm_bo);
XE_WARN_ON(bo->created && kref_read(&ttm_bo->base.refcount)); xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
/* /*
* Corner case where TTM fails to allocate memory and this BOs resv * Corner case where TTM fails to allocate memory and this BOs resv
...@@ -1041,12 +1043,13 @@ struct ttm_device_funcs xe_ttm_funcs = { ...@@ -1041,12 +1043,13 @@ struct ttm_device_funcs xe_ttm_funcs = {
static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo) static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
{ {
struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
if (bo->ttm.base.import_attach) if (bo->ttm.base.import_attach)
drm_prime_gem_destroy(&bo->ttm.base, NULL); drm_prime_gem_destroy(&bo->ttm.base, NULL);
drm_gem_object_release(&bo->ttm.base); drm_gem_object_release(&bo->ttm.base);
WARN_ON(!list_empty(&bo->vmas)); xe_assert(xe, list_empty(&bo->vmas));
if (bo->ggtt_node.size) if (bo->ggtt_node.size)
xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo); xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
...@@ -1082,7 +1085,7 @@ static void xe_gem_object_close(struct drm_gem_object *obj, ...@@ -1082,7 +1085,7 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
struct xe_bo *bo = gem_to_xe_bo(obj); struct xe_bo *bo = gem_to_xe_bo(obj);
if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) { if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) {
XE_WARN_ON(!xe_bo_is_user(bo)); xe_assert(xe_bo_device(bo), xe_bo_is_user(bo));
xe_bo_lock(bo, false); xe_bo_lock(bo, false);
ttm_bo_set_bulk_move(&bo->ttm, NULL); ttm_bo_set_bulk_move(&bo->ttm, NULL);
...@@ -1198,7 +1201,7 @@ struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, ...@@ -1198,7 +1201,7 @@ struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
int err; int err;
/* Only kernel objects should set GT */ /* Only kernel objects should set GT */
XE_WARN_ON(tile && type != ttm_bo_type_kernel); xe_assert(xe, !tile || type == ttm_bo_type_kernel);
if (XE_WARN_ON(!size)) { if (XE_WARN_ON(!size)) {
xe_bo_free(bo); xe_bo_free(bo);
...@@ -1354,7 +1357,7 @@ xe_bo_create_locked_range(struct xe_device *xe, ...@@ -1354,7 +1357,7 @@ xe_bo_create_locked_range(struct xe_device *xe,
if (!tile && flags & XE_BO_CREATE_STOLEN_BIT) if (!tile && flags & XE_BO_CREATE_STOLEN_BIT)
tile = xe_device_get_root_tile(xe); tile = xe_device_get_root_tile(xe);
XE_WARN_ON(!tile); xe_assert(xe, tile);
if (flags & XE_BO_CREATE_STOLEN_BIT && if (flags & XE_BO_CREATE_STOLEN_BIT &&
flags & XE_BO_FIXED_PLACEMENT_BIT) { flags & XE_BO_FIXED_PLACEMENT_BIT) {
...@@ -1485,8 +1488,8 @@ int xe_bo_pin_external(struct xe_bo *bo) ...@@ -1485,8 +1488,8 @@ int xe_bo_pin_external(struct xe_bo *bo)
struct xe_device *xe = xe_bo_device(bo); struct xe_device *xe = xe_bo_device(bo);
int err; int err;
XE_WARN_ON(bo->vm); xe_assert(xe, !bo->vm);
XE_WARN_ON(!xe_bo_is_user(bo)); xe_assert(xe, xe_bo_is_user(bo));
if (!xe_bo_is_pinned(bo)) { if (!xe_bo_is_pinned(bo)) {
err = xe_bo_validate(bo, NULL, false); err = xe_bo_validate(bo, NULL, false);
...@@ -1518,20 +1521,20 @@ int xe_bo_pin(struct xe_bo *bo) ...@@ -1518,20 +1521,20 @@ int xe_bo_pin(struct xe_bo *bo)
int err; int err;
/* We currently don't expect user BO to be pinned */ /* We currently don't expect user BO to be pinned */
XE_WARN_ON(xe_bo_is_user(bo)); xe_assert(xe, !xe_bo_is_user(bo));
/* Pinned object must be in GGTT or have pinned flag */ /* Pinned object must be in GGTT or have pinned flag */
XE_WARN_ON(!(bo->flags & (XE_BO_CREATE_PINNED_BIT | xe_assert(xe, bo->flags & (XE_BO_CREATE_PINNED_BIT |
XE_BO_CREATE_GGTT_BIT))); XE_BO_CREATE_GGTT_BIT));
/* /*
* No reason we can't support pinning imported dma-bufs we just don't * No reason we can't support pinning imported dma-bufs we just don't
* expect to pin an imported dma-buf. * expect to pin an imported dma-buf.
*/ */
XE_WARN_ON(bo->ttm.base.import_attach); xe_assert(xe, !bo->ttm.base.import_attach);
/* We only expect at most 1 pin */ /* We only expect at most 1 pin */
XE_WARN_ON(xe_bo_is_pinned(bo)); xe_assert(xe, !xe_bo_is_pinned(bo));
err = xe_bo_validate(bo, NULL, false); err = xe_bo_validate(bo, NULL, false);
if (err) if (err)
...@@ -1547,7 +1550,7 @@ int xe_bo_pin(struct xe_bo *bo) ...@@ -1547,7 +1550,7 @@ int xe_bo_pin(struct xe_bo *bo)
struct ttm_place *place = &(bo->placements[0]); struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) { if (mem_type_is_vram(place->mem_type)) {
XE_WARN_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS)); xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) - place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT; vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
...@@ -1584,9 +1587,9 @@ void xe_bo_unpin_external(struct xe_bo *bo) ...@@ -1584,9 +1587,9 @@ void xe_bo_unpin_external(struct xe_bo *bo)
{ {
struct xe_device *xe = xe_bo_device(bo); struct xe_device *xe = xe_bo_device(bo);
XE_WARN_ON(bo->vm); xe_assert(xe, !bo->vm);
XE_WARN_ON(!xe_bo_is_pinned(bo)); xe_assert(xe, xe_bo_is_pinned(bo));
XE_WARN_ON(!xe_bo_is_user(bo)); xe_assert(xe, xe_bo_is_user(bo));
if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) { if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) {
spin_lock(&xe->pinned.lock); spin_lock(&xe->pinned.lock);
...@@ -1607,15 +1610,15 @@ void xe_bo_unpin(struct xe_bo *bo) ...@@ -1607,15 +1610,15 @@ void xe_bo_unpin(struct xe_bo *bo)
{ {
struct xe_device *xe = xe_bo_device(bo); struct xe_device *xe = xe_bo_device(bo);
XE_WARN_ON(bo->ttm.base.import_attach); xe_assert(xe, !bo->ttm.base.import_attach);
XE_WARN_ON(!xe_bo_is_pinned(bo)); xe_assert(xe, xe_bo_is_pinned(bo));
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) && if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
bo->flags & XE_BO_INTERNAL_TEST)) { bo->flags & XE_BO_INTERNAL_TEST)) {
struct ttm_place *place = &(bo->placements[0]); struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) { if (mem_type_is_vram(place->mem_type)) {
XE_WARN_ON(list_empty(&bo->pinned_link)); xe_assert(xe, !list_empty(&bo->pinned_link));
spin_lock(&xe->pinned.lock); spin_lock(&xe->pinned.lock);
list_del_init(&bo->pinned_link); list_del_init(&bo->pinned_link);
...@@ -1676,15 +1679,16 @@ bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo) ...@@ -1676,15 +1679,16 @@ bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
*/ */
dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size) dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
{ {
struct xe_device *xe = xe_bo_device(bo);
struct xe_res_cursor cur; struct xe_res_cursor cur;
u64 page; u64 page;
XE_WARN_ON(page_size > PAGE_SIZE); xe_assert(xe, page_size <= PAGE_SIZE);
page = offset >> PAGE_SHIFT; page = offset >> PAGE_SHIFT;
offset &= (PAGE_SIZE - 1); offset &= (PAGE_SIZE - 1);
if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) { if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
XE_WARN_ON(!bo->ttm.ttm); xe_assert(xe, bo->ttm.ttm);
xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT, xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT,
page_size, &cur); page_size, &cur);
......
...@@ -158,8 +158,8 @@ int xe_bo_restore_kernel(struct xe_device *xe) ...@@ -158,8 +158,8 @@ int xe_bo_restore_kernel(struct xe_device *xe)
* We expect validate to trigger a move VRAM and our move code * We expect validate to trigger a move VRAM and our move code
* should setup the iosys map. * should setup the iosys map.
*/ */
XE_WARN_ON(iosys_map_is_null(&bo->vmap)); xe_assert(xe, !iosys_map_is_null(&bo->vmap));
XE_WARN_ON(!xe_bo_is_vram(bo)); xe_assert(xe, xe_bo_is_vram(bo));
xe_bo_put(bo); xe_bo_put(bo);
......
...@@ -394,7 +394,7 @@ bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe) ...@@ -394,7 +394,7 @@ bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe)
if (active) { if (active) {
int ref = atomic_inc_return(&xe->mem_access.ref); int ref = atomic_inc_return(&xe->mem_access.ref);
XE_WARN_ON(ref == S32_MAX); xe_assert(xe, ref != S32_MAX);
} }
return active; return active;
...@@ -436,7 +436,7 @@ void xe_device_mem_access_get(struct xe_device *xe) ...@@ -436,7 +436,7 @@ void xe_device_mem_access_get(struct xe_device *xe)
xe_pm_runtime_get(xe); xe_pm_runtime_get(xe);
ref = atomic_inc_return(&xe->mem_access.ref); ref = atomic_inc_return(&xe->mem_access.ref);
XE_WARN_ON(ref == S32_MAX); xe_assert(xe, ref != S32_MAX);
} }
...@@ -450,5 +450,5 @@ void xe_device_mem_access_put(struct xe_device *xe) ...@@ -450,5 +450,5 @@ void xe_device_mem_access_put(struct xe_device *xe)
ref = atomic_dec_return(&xe->mem_access.ref); ref = atomic_dec_return(&xe->mem_access.ref);
xe_pm_runtime_put(xe); xe_pm_runtime_put(xe);
XE_WARN_ON(ref < 0); xe_assert(xe, ref >= 0);
} }
...@@ -117,7 +117,7 @@ static int xe_exec_begin(struct drm_exec *exec, struct xe_vm *vm) ...@@ -117,7 +117,7 @@ static int xe_exec_begin(struct drm_exec *exec, struct xe_vm *vm)
* to a location where the GPU can access it). * to a location where the GPU can access it).
*/ */
list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) { list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
XE_WARN_ON(xe_vma_is_null(vma)); xe_assert(vm->xe, !xe_vma_is_null(vma));
if (xe_vma_is_userptr(vma)) if (xe_vma_is_userptr(vma))
continue; continue;
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "regs/xe_gt_regs.h" #include "regs/xe_gt_regs.h"
#include "regs/xe_lrc_layout.h" #include "regs/xe_lrc_layout.h"
#include "regs/xe_regs.h" #include "regs/xe_regs.h"
#include "xe_assert.h"
#include "xe_bo.h" #include "xe_bo.h"
#include "xe_device.h" #include "xe_device.h"
#include "xe_exec_queue.h" #include "xe_exec_queue.h"
...@@ -50,10 +51,10 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc, ...@@ -50,10 +51,10 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
lrc_desc = xe_lrc_descriptor(lrc); lrc_desc = xe_lrc_descriptor(lrc);
if (GRAPHICS_VERx100(xe) >= 1250) { if (GRAPHICS_VERx100(xe) >= 1250) {
XE_WARN_ON(!FIELD_FIT(XEHP_SW_CTX_ID, ctx_id)); xe_gt_assert(hwe->gt, FIELD_FIT(XEHP_SW_CTX_ID, ctx_id));
lrc_desc |= FIELD_PREP(XEHP_SW_CTX_ID, ctx_id); lrc_desc |= FIELD_PREP(XEHP_SW_CTX_ID, ctx_id);
} else { } else {
XE_WARN_ON(!FIELD_FIT(GEN11_SW_CTX_ID, ctx_id)); xe_gt_assert(hwe->gt, FIELD_FIT(GEN11_SW_CTX_ID, ctx_id));
lrc_desc |= FIELD_PREP(GEN11_SW_CTX_ID, ctx_id); lrc_desc |= FIELD_PREP(GEN11_SW_CTX_ID, ctx_id);
} }
...@@ -321,7 +322,7 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q) ...@@ -321,7 +322,7 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q)
struct xe_device *xe = gt_to_xe(q->gt); struct xe_device *xe = gt_to_xe(q->gt);
int err; int err;
XE_WARN_ON(xe_device_guc_submission_enabled(xe)); xe_assert(xe, !xe_device_guc_submission_enabled(xe));
drm_info(&xe->drm, "Enabling execlist submission (GuC submission disabled)\n"); drm_info(&xe->drm, "Enabling execlist submission (GuC submission disabled)\n");
...@@ -367,9 +368,10 @@ static void execlist_exec_queue_fini_async(struct work_struct *w) ...@@ -367,9 +368,10 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
container_of(w, struct xe_execlist_exec_queue, fini_async); container_of(w, struct xe_execlist_exec_queue, fini_async);
struct xe_exec_queue *q = ee->q; struct xe_exec_queue *q = ee->q;
struct xe_execlist_exec_queue *exl = q->execlist; struct xe_execlist_exec_queue *exl = q->execlist;
struct xe_device *xe = gt_to_xe(q->gt);
unsigned long flags; unsigned long flags;
XE_WARN_ON(xe_device_guc_submission_enabled(gt_to_xe(q->gt))); xe_assert(xe, !xe_device_guc_submission_enabled(xe));
spin_lock_irqsave(&exl->port->lock, flags); spin_lock_irqsave(&exl->port->lock, flags);
if (WARN_ON(exl->active_priority != XE_EXEC_QUEUE_PRIORITY_UNSET)) if (WARN_ON(exl->active_priority != XE_EXEC_QUEUE_PRIORITY_UNSET))
...@@ -377,7 +379,7 @@ static void execlist_exec_queue_fini_async(struct work_struct *w) ...@@ -377,7 +379,7 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
spin_unlock_irqrestore(&exl->port->lock, flags); spin_unlock_irqrestore(&exl->port->lock, flags);
if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT) if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q); xe_device_remove_persistent_exec_queues(xe, q);
drm_sched_entity_fini(&exl->entity); drm_sched_entity_fini(&exl->entity);
drm_sched_fini(&exl->sched); drm_sched_fini(&exl->sched);
kfree(exl); kfree(exl);
......
...@@ -45,7 +45,7 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw) ...@@ -45,7 +45,7 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
mutex_init(&fw->lock); mutex_init(&fw->lock);
/* Assuming gen11+ so assert this assumption is correct */ /* Assuming gen11+ so assert this assumption is correct */
XE_WARN_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11); xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
if (xe->info.graphics_verx100 >= 1270) { if (xe->info.graphics_verx100 >= 1270) {
domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT], domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
...@@ -67,7 +67,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) ...@@ -67,7 +67,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
int i, j; int i, j;
/* Assuming gen11+ so assert this assumption is correct */ /* Assuming gen11+ so assert this assumption is correct */
XE_WARN_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11); xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
if (!xe_gt_is_media_type(gt)) if (!xe_gt_is_media_type(gt))
domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER], domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER],
......
...@@ -6,8 +6,8 @@ ...@@ -6,8 +6,8 @@
#ifndef _XE_FORCE_WAKE_H_ #ifndef _XE_FORCE_WAKE_H_
#define _XE_FORCE_WAKE_H_ #define _XE_FORCE_WAKE_H_
#include "xe_assert.h"
#include "xe_force_wake_types.h" #include "xe_force_wake_types.h"
#include "xe_macros.h"
struct xe_gt; struct xe_gt;
...@@ -24,7 +24,7 @@ static inline int ...@@ -24,7 +24,7 @@ static inline int
xe_force_wake_ref(struct xe_force_wake *fw, xe_force_wake_ref(struct xe_force_wake *fw,
enum xe_force_wake_domains domain) enum xe_force_wake_domains domain)
{ {
XE_WARN_ON(!domain); xe_gt_assert(fw->gt, domain);
return fw->domains[ffs(domain) - 1].ref; return fw->domains[ffs(domain) - 1].ref;
} }
...@@ -32,7 +32,7 @@ static inline void ...@@ -32,7 +32,7 @@ static inline void
xe_force_wake_assert_held(struct xe_force_wake *fw, xe_force_wake_assert_held(struct xe_force_wake *fw,
enum xe_force_wake_domains domain) enum xe_force_wake_domains domain)
{ {
XE_WARN_ON(!(fw->awake_domains & domain)); xe_gt_assert(fw->gt, fw->awake_domains & domain);
} }
#endif #endif
...@@ -58,8 +58,8 @@ static unsigned int probe_gsm_size(struct pci_dev *pdev) ...@@ -58,8 +58,8 @@ static unsigned int probe_gsm_size(struct pci_dev *pdev)
void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte) void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte)
{ {
XE_WARN_ON(addr & XE_PTE_MASK); xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK));
XE_WARN_ON(addr >= ggtt->size); xe_tile_assert(ggtt->tile, addr < ggtt->size);
writeq(pte, &ggtt->gsm[addr >> XE_PTE_SHIFT]); writeq(pte, &ggtt->gsm[addr >> XE_PTE_SHIFT]);
} }
...@@ -69,7 +69,7 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size) ...@@ -69,7 +69,7 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
u64 end = start + size - 1; u64 end = start + size - 1;
u64 scratch_pte; u64 scratch_pte;
XE_WARN_ON(start >= end); xe_tile_assert(ggtt->tile, start < end);
if (ggtt->scratch) if (ggtt->scratch)
scratch_pte = xe_ggtt_pte_encode(ggtt->scratch, 0); scratch_pte = xe_ggtt_pte_encode(ggtt->scratch, 0);
...@@ -230,7 +230,7 @@ static void ggtt_invalidate_gt_tlb(struct xe_gt *gt) ...@@ -230,7 +230,7 @@ static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
int seqno; int seqno;
seqno = xe_gt_tlb_invalidation_guc(gt); seqno = xe_gt_tlb_invalidation_guc(gt);
XE_WARN_ON(seqno <= 0); xe_gt_assert(gt, seqno > 0);
if (seqno > 0) if (seqno > 0)
xe_gt_tlb_invalidation_wait(gt, seqno); xe_gt_tlb_invalidation_wait(gt, seqno);
} else if (xe_device_guc_submission_enabled(gt_to_xe(gt))) { } else if (xe_device_guc_submission_enabled(gt_to_xe(gt))) {
...@@ -266,7 +266,7 @@ void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix) ...@@ -266,7 +266,7 @@ void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix)
for (addr = 0; addr < ggtt->size; addr += XE_PAGE_SIZE) { for (addr = 0; addr < ggtt->size; addr += XE_PAGE_SIZE) {
unsigned int i = addr / XE_PAGE_SIZE; unsigned int i = addr / XE_PAGE_SIZE;
XE_WARN_ON(addr > U32_MAX); xe_tile_assert(ggtt->tile, addr <= U32_MAX);
if (ggtt->gsm[i] == scratch_pte) if (ggtt->gsm[i] == scratch_pte)
continue; continue;
...@@ -315,7 +315,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, ...@@ -315,7 +315,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
if (XE_WARN_ON(bo->ggtt_node.size)) { if (XE_WARN_ON(bo->ggtt_node.size)) {
/* Someone's already inserted this BO in the GGTT */ /* Someone's already inserted this BO in the GGTT */
XE_WARN_ON(bo->ggtt_node.size != bo->size); xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
return 0; return 0;
} }
...@@ -378,7 +378,7 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) ...@@ -378,7 +378,7 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
return; return;
/* This BO is not currently in the GGTT */ /* This BO is not currently in the GGTT */
XE_WARN_ON(bo->ggtt_node.size != bo->size); xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
xe_ggtt_remove_node(ggtt, &bo->ggtt_node); xe_ggtt_remove_node(ggtt, &bo->ggtt_node);
} }
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <drm/xe_drm.h> #include <drm/xe_drm.h>
#include "regs/xe_gt_regs.h" #include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_bb.h" #include "xe_bb.h"
#include "xe_bo.h" #include "xe_bo.h"
#include "xe_device.h" #include "xe_device.h"
......
...@@ -58,7 +58,7 @@ int xe_gt_clock_init(struct xe_gt *gt) ...@@ -58,7 +58,7 @@ int xe_gt_clock_init(struct xe_gt *gt)
u32 freq = 0; u32 freq = 0;
/* Assuming gen11+ so assert this assumption is correct */ /* Assuming gen11+ so assert this assumption is correct */
XE_WARN_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11); xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
if (ctc_reg & CTC_SOURCE_DIVIDE_LOGIC) { if (ctc_reg & CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(gt); freq = read_reference_ts_freq(gt);
......
...@@ -158,7 +158,7 @@ void xe_gt_debugfs_register(struct xe_gt *gt) ...@@ -158,7 +158,7 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
char name[8]; char name[8];
int i; int i;
XE_WARN_ON(!minor->debugfs_root); xe_gt_assert(gt, minor->debugfs_root);
sprintf(name, "gt%d", gt->info.id); sprintf(name, "gt%d", gt->info.id);
root = debugfs_create_dir(name, minor->debugfs_root); root = debugfs_create_dir(name, minor->debugfs_root);
......
...@@ -250,7 +250,7 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, ...@@ -250,7 +250,7 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
u32 action[MAX_TLB_INVALIDATION_LEN]; u32 action[MAX_TLB_INVALIDATION_LEN];
int len = 0; int len = 0;
XE_WARN_ON(!vma); xe_gt_assert(gt, vma);
action[len++] = XE_GUC_ACTION_TLB_INVALIDATION; action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */ action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
...@@ -288,10 +288,10 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, ...@@ -288,10 +288,10 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
start = ALIGN_DOWN(xe_vma_start(vma), length); start = ALIGN_DOWN(xe_vma_start(vma), length);
} }
XE_WARN_ON(length < SZ_4K); xe_gt_assert(gt, length >= SZ_4K);
XE_WARN_ON(!is_power_of_2(length)); xe_gt_assert(gt, is_power_of_2(length));
XE_WARN_ON(length & GENMASK(ilog2(SZ_16M) - 1, ilog2(SZ_2M) + 1)); xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1, ilog2(SZ_2M) + 1)));
XE_WARN_ON(!IS_ALIGNED(start, length)); xe_gt_assert(gt, IS_ALIGNED(start, length));
action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE); action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
action[len++] = xe_vma_vm(vma)->usm.asid; action[len++] = xe_vma_vm(vma)->usm.asid;
...@@ -300,7 +300,7 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, ...@@ -300,7 +300,7 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
action[len++] = ilog2(length) - ilog2(SZ_4K); action[len++] = ilog2(length) - ilog2(SZ_4K);
} }
XE_WARN_ON(len > MAX_TLB_INVALIDATION_LEN); xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
return send_tlb_invalidation(&gt->uc.guc, fence, action, len); return send_tlb_invalidation(&gt->uc.guc, fence, action, len);
} }
......
...@@ -44,11 +44,12 @@ guc_to_xe(struct xe_guc *guc) ...@@ -44,11 +44,12 @@ guc_to_xe(struct xe_guc *guc)
static u32 guc_bo_ggtt_addr(struct xe_guc *guc, static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
struct xe_bo *bo) struct xe_bo *bo)
{ {
struct xe_device *xe = guc_to_xe(guc);
u32 addr = xe_bo_ggtt_addr(bo); u32 addr = xe_bo_ggtt_addr(bo);
XE_WARN_ON(addr < xe_wopcm_size(guc_to_xe(guc))); xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
XE_WARN_ON(addr >= GUC_GGTT_TOP); xe_assert(xe, addr < GUC_GGTT_TOP);
XE_WARN_ON(bo->size > GUC_GGTT_TOP - addr); xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr);
return addr; return addr;
} }
...@@ -629,13 +630,13 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request, ...@@ -629,13 +630,13 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT); BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
XE_WARN_ON(guc->ct.enabled); xe_assert(xe, !guc->ct.enabled);
XE_WARN_ON(!len); xe_assert(xe, len);
XE_WARN_ON(len > VF_SW_FLAG_COUNT); xe_assert(xe, len <= VF_SW_FLAG_COUNT);
XE_WARN_ON(len > MED_VF_SW_FLAG_COUNT); xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
XE_WARN_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) ==
GUC_HXG_ORIGIN_HOST); GUC_HXG_ORIGIN_HOST);
XE_WARN_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) ==
GUC_HXG_TYPE_REQUEST); GUC_HXG_TYPE_REQUEST);
retry: retry:
...@@ -727,6 +728,7 @@ int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len) ...@@ -727,6 +728,7 @@ int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val) static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val)
{ {
struct xe_device *xe = guc_to_xe(guc);
u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = { u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
...@@ -741,8 +743,8 @@ static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val) ...@@ -741,8 +743,8 @@ static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val)
}; };
int ret; int ret;
XE_WARN_ON(len > 2); xe_assert(xe, len <= 2);
XE_WARN_ON(len == 1 && upper_32_bits(val)); xe_assert(xe, len != 1 || !upper_32_bits(val));
/* Self config must go over MMIO */ /* Self config must go over MMIO */
ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request)); ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
......
...@@ -118,7 +118,9 @@ struct __guc_ads_blob { ...@@ -118,7 +118,9 @@ struct __guc_ads_blob {
static size_t guc_ads_regset_size(struct xe_guc_ads *ads) static size_t guc_ads_regset_size(struct xe_guc_ads *ads)
{ {
XE_WARN_ON(!ads->regset_size); struct xe_device *xe = ads_to_xe(ads);
xe_assert(xe, ads->regset_size);
return ads->regset_size; return ads->regset_size;
} }
...@@ -309,13 +311,13 @@ int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads) ...@@ -309,13 +311,13 @@ int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads)
struct xe_gt *gt = ads_to_gt(ads); struct xe_gt *gt = ads_to_gt(ads);
u32 prev_regset_size = ads->regset_size; u32 prev_regset_size = ads->regset_size;
XE_WARN_ON(!ads->bo); xe_gt_assert(gt, ads->bo);
ads->golden_lrc_size = calculate_golden_lrc_size(ads); ads->golden_lrc_size = calculate_golden_lrc_size(ads);
ads->regset_size = calculate_regset_size(gt); ads->regset_size = calculate_regset_size(gt);
XE_WARN_ON(ads->golden_lrc_size + xe_gt_assert(gt, ads->golden_lrc_size +
(ads->regset_size - prev_regset_size) > (ads->regset_size - prev_regset_size) <=
MAX_GOLDEN_LRC_SIZE); MAX_GOLDEN_LRC_SIZE);
return 0; return 0;
...@@ -517,7 +519,7 @@ static void guc_mmio_reg_state_init(struct xe_guc_ads *ads) ...@@ -517,7 +519,7 @@ static void guc_mmio_reg_state_init(struct xe_guc_ads *ads)
regset_used += count * sizeof(struct guc_mmio_reg); regset_used += count * sizeof(struct guc_mmio_reg);
} }
XE_WARN_ON(regset_used > ads->regset_size); xe_gt_assert(gt, regset_used <= ads->regset_size);
} }
static void guc_um_init_params(struct xe_guc_ads *ads) static void guc_um_init_params(struct xe_guc_ads *ads)
...@@ -572,7 +574,7 @@ void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads) ...@@ -572,7 +574,7 @@ void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads)
offsetof(struct __guc_ads_blob, system_info)); offsetof(struct __guc_ads_blob, system_info));
u32 base = xe_bo_ggtt_addr(ads->bo); u32 base = xe_bo_ggtt_addr(ads->bo);
XE_WARN_ON(!ads->bo); xe_gt_assert(gt, ads->bo);
xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size); xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
guc_policies_init(ads); guc_policies_init(ads);
...@@ -596,7 +598,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads) ...@@ -596,7 +598,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
offsetof(struct __guc_ads_blob, system_info)); offsetof(struct __guc_ads_blob, system_info));
u32 base = xe_bo_ggtt_addr(ads->bo); u32 base = xe_bo_ggtt_addr(ads->bo);
XE_WARN_ON(!ads->bo); xe_gt_assert(gt, ads->bo);
xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size); xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
guc_policies_init(ads); guc_policies_init(ads);
...@@ -643,7 +645,7 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads) ...@@ -643,7 +645,7 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
engine_enabled_masks[guc_class])) engine_enabled_masks[guc_class]))
continue; continue;
XE_WARN_ON(!gt->default_lrc[class]); xe_gt_assert(gt, gt->default_lrc[class]);
real_size = xe_lrc_size(xe, class); real_size = xe_lrc_size(xe, class);
alloc_size = PAGE_ALIGN(real_size); alloc_size = PAGE_ALIGN(real_size);
...@@ -672,7 +674,7 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads) ...@@ -672,7 +674,7 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
offset += alloc_size; offset += alloc_size;
} }
XE_WARN_ON(total_size != ads->golden_lrc_size); xe_gt_assert(gt, total_size == ads->golden_lrc_size);
} }
void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads) void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads)
......
...@@ -135,7 +135,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) ...@@ -135,7 +135,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
struct xe_bo *bo; struct xe_bo *bo;
int err; int err;
XE_WARN_ON(guc_ct_size() % PAGE_SIZE); xe_assert(xe, !(guc_ct_size() % PAGE_SIZE));
mutex_init(&ct->lock); mutex_init(&ct->lock);
spin_lock_init(&ct->fast_lock); spin_lock_init(&ct->fast_lock);
...@@ -283,7 +283,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct) ...@@ -283,7 +283,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
struct xe_device *xe = ct_to_xe(ct); struct xe_device *xe = ct_to_xe(ct);
int err; int err;
XE_WARN_ON(ct->enabled); xe_assert(xe, !ct->enabled);
guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap); guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap); guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
...@@ -376,7 +376,7 @@ static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len) ...@@ -376,7 +376,7 @@ static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
{ {
XE_WARN_ON(g2h_len > ct->ctbs.g2h.info.space); xe_assert(ct_to_xe(ct), g2h_len <= ct->ctbs.g2h.info.space);
if (g2h_len) { if (g2h_len) {
lockdep_assert_held(&ct->fast_lock); lockdep_assert_held(&ct->fast_lock);
...@@ -389,7 +389,7 @@ static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) ...@@ -389,7 +389,7 @@ static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
{ {
lockdep_assert_held(&ct->fast_lock); lockdep_assert_held(&ct->fast_lock);
XE_WARN_ON(ct->ctbs.g2h.info.space + g2h_len > xe_assert(ct_to_xe(ct), ct->ctbs.g2h.info.space + g2h_len <=
ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space); ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
ct->ctbs.g2h.info.space += g2h_len; ct->ctbs.g2h.info.space += g2h_len;
...@@ -419,8 +419,8 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, ...@@ -419,8 +419,8 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
full_len = len + GUC_CTB_HDR_LEN; full_len = len + GUC_CTB_HDR_LEN;
lockdep_assert_held(&ct->lock); lockdep_assert_held(&ct->lock);
XE_WARN_ON(full_len > (GUC_CTB_MSG_MAX_LEN - GUC_CTB_HDR_LEN)); xe_assert(xe, full_len <= (GUC_CTB_MSG_MAX_LEN - GUC_CTB_HDR_LEN));
XE_WARN_ON(tail > h2g->info.size); xe_assert(xe, tail <= h2g->info.size);
/* Command will wrap, zero fill (NOPs), return and check credits again */ /* Command will wrap, zero fill (NOPs), return and check credits again */
if (tail + full_len > h2g->info.size) { if (tail + full_len > h2g->info.size) {
...@@ -476,12 +476,13 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, ...@@ -476,12 +476,13 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 g2h_len, u32 num_g2h, u32 len, u32 g2h_len, u32 num_g2h,
struct g2h_fence *g2h_fence) struct g2h_fence *g2h_fence)
{ {
struct xe_device *xe = ct_to_xe(ct);
int ret; int ret;
XE_WARN_ON(g2h_len && g2h_fence); xe_assert(xe, !g2h_len || !g2h_fence);
XE_WARN_ON(num_g2h && g2h_fence); xe_assert(xe, !num_g2h || !g2h_fence);
XE_WARN_ON(g2h_len && !num_g2h); xe_assert(xe, !g2h_len || num_g2h);
XE_WARN_ON(!g2h_len && num_g2h); xe_assert(xe, g2h_len || !num_g2h);
lockdep_assert_held(&ct->lock); lockdep_assert_held(&ct->lock);
if (unlikely(ct->ctbs.h2g.info.broken)) { if (unlikely(ct->ctbs.h2g.info.broken)) {
...@@ -552,7 +553,7 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, ...@@ -552,7 +553,7 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
unsigned int sleep_period_ms = 1; unsigned int sleep_period_ms = 1;
int ret; int ret;
XE_WARN_ON(g2h_len && g2h_fence); xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
lockdep_assert_held(&ct->lock); lockdep_assert_held(&ct->lock);
xe_device_assert_mem_access(ct_to_xe(ct)); xe_device_assert_mem_access(ct_to_xe(ct));
...@@ -622,7 +623,7 @@ static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len, ...@@ -622,7 +623,7 @@ static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
{ {
int ret; int ret;
XE_WARN_ON(g2h_len && g2h_fence); xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
mutex_lock(&ct->lock); mutex_lock(&ct->lock);
ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence); ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
...@@ -798,7 +799,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) ...@@ -798,7 +799,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
return 0; return 0;
} }
XE_WARN_ON(fence != g2h_fence->seqno); xe_assert(xe, fence == g2h_fence->seqno);
if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) { if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
g2h_fence->fail = true; g2h_fence->fail = true;
......
...@@ -55,12 +55,12 @@ void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p) ...@@ -55,12 +55,12 @@ void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p)
size_t size; size_t size;
int i, j; int i, j;
XE_WARN_ON(!log->bo); xe_assert(xe, log->bo);
size = log->bo->size; size = log->bo->size;
#define DW_PER_READ 128 #define DW_PER_READ 128
XE_WARN_ON(size % (DW_PER_READ * sizeof(u32))); xe_assert(xe, !(size % (DW_PER_READ * sizeof(u32))));
for (i = 0; i < size / sizeof(u32); i += DW_PER_READ) { for (i = 0; i < size / sizeof(u32); i += DW_PER_READ) {
u32 read[DW_PER_READ]; u32 read[DW_PER_READ];
......
...@@ -816,7 +816,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) ...@@ -816,7 +816,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
int ret; int ret;
XE_WARN_ON(!xe_device_guc_submission_enabled(xe)); xe_gt_assert(gt, xe_device_guc_submission_enabled(xe));
xe_device_mem_access_get(pc_to_xe(pc)); xe_device_mem_access_get(pc_to_xe(pc));
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <drm/drm_managed.h> #include <drm/drm_managed.h>
#include "regs/xe_lrc_layout.h" #include "regs/xe_lrc_layout.h"
#include "xe_assert.h"
#include "xe_devcoredump.h" #include "xe_devcoredump.h"
#include "xe_device.h" #include "xe_device.h"
#include "xe_exec_queue.h" #include "xe_exec_queue.h"
...@@ -354,11 +355,12 @@ static const int xe_exec_queue_prio_to_guc[] = { ...@@ -354,11 +355,12 @@ static const int xe_exec_queue_prio_to_guc[] = {
static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q) static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
{ {
struct exec_queue_policy policy; struct exec_queue_policy policy;
struct xe_device *xe = guc_to_xe(guc);
enum xe_exec_queue_priority prio = q->priority; enum xe_exec_queue_priority prio = q->priority;
u32 timeslice_us = q->sched_props.timeslice_us; u32 timeslice_us = q->sched_props.timeslice_us;
u32 preempt_timeout_us = q->sched_props.preempt_timeout_us; u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
XE_WARN_ON(!exec_queue_registered(q)); xe_assert(xe, exec_queue_registered(q));
__guc_exec_queue_policy_start_klv(&policy, q->guc->id); __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
__guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]); __guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
...@@ -392,11 +394,12 @@ static void __register_mlrc_engine(struct xe_guc *guc, ...@@ -392,11 +394,12 @@ static void __register_mlrc_engine(struct xe_guc *guc,
struct guc_ctxt_registration_info *info) struct guc_ctxt_registration_info *info)
{ {
#define MAX_MLRC_REG_SIZE (13 + XE_HW_ENGINE_MAX_INSTANCE * 2) #define MAX_MLRC_REG_SIZE (13 + XE_HW_ENGINE_MAX_INSTANCE * 2)
struct xe_device *xe = guc_to_xe(guc);
u32 action[MAX_MLRC_REG_SIZE]; u32 action[MAX_MLRC_REG_SIZE];
int len = 0; int len = 0;
int i; int i;
XE_WARN_ON(!xe_exec_queue_is_parallel(q)); xe_assert(xe, xe_exec_queue_is_parallel(q));
action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC; action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
action[len++] = info->flags; action[len++] = info->flags;
...@@ -419,7 +422,7 @@ static void __register_mlrc_engine(struct xe_guc *guc, ...@@ -419,7 +422,7 @@ static void __register_mlrc_engine(struct xe_guc *guc,
action[len++] = upper_32_bits(xe_lrc_descriptor(lrc)); action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
} }
XE_WARN_ON(len > MAX_MLRC_REG_SIZE); xe_assert(xe, len <= MAX_MLRC_REG_SIZE);
#undef MAX_MLRC_REG_SIZE #undef MAX_MLRC_REG_SIZE
xe_guc_ct_send(&guc->ct, action, len, 0, 0); xe_guc_ct_send(&guc->ct, action, len, 0, 0);
...@@ -453,7 +456,7 @@ static void register_engine(struct xe_exec_queue *q) ...@@ -453,7 +456,7 @@ static void register_engine(struct xe_exec_queue *q)
struct xe_lrc *lrc = q->lrc; struct xe_lrc *lrc = q->lrc;
struct guc_ctxt_registration_info info; struct guc_ctxt_registration_info info;
XE_WARN_ON(exec_queue_registered(q)); xe_assert(xe, !exec_queue_registered(q));
memset(&info, 0, sizeof(info)); memset(&info, 0, sizeof(info));
info.context_idx = q->guc->id; info.context_idx = q->guc->id;
...@@ -543,7 +546,7 @@ static int wq_noop_append(struct xe_exec_queue *q) ...@@ -543,7 +546,7 @@ static int wq_noop_append(struct xe_exec_queue *q)
if (wq_wait_for_space(q, wq_space_until_wrap(q))) if (wq_wait_for_space(q, wq_space_until_wrap(q)))
return -ENODEV; return -ENODEV;
XE_WARN_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw)); xe_assert(xe, FIELD_FIT(WQ_LEN_MASK, len_dw));
parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)], parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) | FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
...@@ -583,13 +586,13 @@ static void wq_item_append(struct xe_exec_queue *q) ...@@ -583,13 +586,13 @@ static void wq_item_append(struct xe_exec_queue *q)
wqi[i++] = lrc->ring.tail / sizeof(u64); wqi[i++] = lrc->ring.tail / sizeof(u64);
} }
XE_WARN_ON(i != wqi_size / sizeof(u32)); xe_assert(xe, i == wqi_size / sizeof(u32));
iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch, iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch,
wq[q->guc->wqi_tail / sizeof(u32)])); wq[q->guc->wqi_tail / sizeof(u32)]));
xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size); xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size);
q->guc->wqi_tail += wqi_size; q->guc->wqi_tail += wqi_size;
XE_WARN_ON(q->guc->wqi_tail > WQ_SIZE); xe_assert(xe, q->guc->wqi_tail <= WQ_SIZE);
xe_device_wmb(xe); xe_device_wmb(xe);
...@@ -601,6 +604,7 @@ static void wq_item_append(struct xe_exec_queue *q) ...@@ -601,6 +604,7 @@ static void wq_item_append(struct xe_exec_queue *q)
static void submit_exec_queue(struct xe_exec_queue *q) static void submit_exec_queue(struct xe_exec_queue *q)
{ {
struct xe_guc *guc = exec_queue_to_guc(q); struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
struct xe_lrc *lrc = q->lrc; struct xe_lrc *lrc = q->lrc;
u32 action[3]; u32 action[3];
u32 g2h_len = 0; u32 g2h_len = 0;
...@@ -608,7 +612,7 @@ static void submit_exec_queue(struct xe_exec_queue *q) ...@@ -608,7 +612,7 @@ static void submit_exec_queue(struct xe_exec_queue *q)
int len = 0; int len = 0;
bool extra_submit = false; bool extra_submit = false;
XE_WARN_ON(!exec_queue_registered(q)); xe_assert(xe, exec_queue_registered(q));
if (xe_exec_queue_is_parallel(q)) if (xe_exec_queue_is_parallel(q))
wq_item_append(q); wq_item_append(q);
...@@ -654,10 +658,12 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job) ...@@ -654,10 +658,12 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
{ {
struct xe_sched_job *job = to_xe_sched_job(drm_job); struct xe_sched_job *job = to_xe_sched_job(drm_job);
struct xe_exec_queue *q = job->q; struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
bool lr = xe_exec_queue_is_lr(q); bool lr = xe_exec_queue_is_lr(q);
XE_WARN_ON((exec_queue_destroyed(q) || exec_queue_pending_disable(q)) && xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
!exec_queue_banned(q) && !exec_queue_suspended(q)); exec_queue_banned(q) || exec_queue_suspended(q));
trace_xe_sched_job_run(job); trace_xe_sched_job_run(job);
...@@ -799,7 +805,7 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w) ...@@ -799,7 +805,7 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
struct xe_device *xe = guc_to_xe(guc); struct xe_device *xe = guc_to_xe(guc);
struct xe_gpu_scheduler *sched = &ge->sched; struct xe_gpu_scheduler *sched = &ge->sched;
XE_WARN_ON(!xe_exec_queue_is_lr(q)); xe_assert(xe, xe_exec_queue_is_lr(q));
trace_xe_exec_queue_lr_cleanup(q); trace_xe_exec_queue_lr_cleanup(q);
/* Kill the run_job / process_msg entry points */ /* Kill the run_job / process_msg entry points */
...@@ -853,8 +859,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) ...@@ -853,8 +859,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
int i = 0; int i = 0;
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) { if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
XE_WARN_ON(q->flags & EXEC_QUEUE_FLAG_KERNEL); xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_KERNEL));
XE_WARN_ON(q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)); xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)));
drm_notice(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx", drm_notice(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx",
xe_sched_job_seqno(job), q->guc->id, q->flags); xe_sched_job_seqno(job), q->guc->id, q->flags);
...@@ -990,8 +996,9 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg) ...@@ -990,8 +996,9 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
{ {
struct xe_exec_queue *q = msg->private_data; struct xe_exec_queue *q = msg->private_data;
struct xe_guc *guc = exec_queue_to_guc(q); struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
XE_WARN_ON(q->flags & EXEC_QUEUE_FLAG_PERMANENT); xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
trace_xe_exec_queue_cleanup_entity(q); trace_xe_exec_queue_cleanup_entity(q);
if (exec_queue_registered(q)) if (exec_queue_registered(q))
...@@ -1018,10 +1025,11 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms ...@@ -1018,10 +1025,11 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
static void suspend_fence_signal(struct xe_exec_queue *q) static void suspend_fence_signal(struct xe_exec_queue *q)
{ {
struct xe_guc *guc = exec_queue_to_guc(q); struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
XE_WARN_ON(!exec_queue_suspended(q) && !exec_queue_killed(q) && xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) ||
!guc_read_stopped(guc)); guc_read_stopped(guc));
XE_WARN_ON(!q->guc->suspend_pending); xe_assert(xe, q->guc->suspend_pending);
q->guc->suspend_pending = false; q->guc->suspend_pending = false;
smp_wmb(); smp_wmb();
...@@ -1125,11 +1133,12 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) ...@@ -1125,11 +1133,12 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
{ {
struct xe_gpu_scheduler *sched; struct xe_gpu_scheduler *sched;
struct xe_guc *guc = exec_queue_to_guc(q); struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
struct xe_guc_exec_queue *ge; struct xe_guc_exec_queue *ge;
long timeout; long timeout;
int err; int err;
XE_WARN_ON(!xe_device_guc_submission_enabled(guc_to_xe(guc))); xe_assert(xe, xe_device_guc_submission_enabled(guc_to_xe(guc)));
ge = kzalloc(sizeof(*ge), GFP_KERNEL); ge = kzalloc(sizeof(*ge), GFP_KERNEL);
if (!ge) if (!ge)
...@@ -1275,10 +1284,12 @@ static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q, ...@@ -1275,10 +1284,12 @@ static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
static int guc_exec_queue_set_job_timeout(struct xe_exec_queue *q, u32 job_timeout_ms) static int guc_exec_queue_set_job_timeout(struct xe_exec_queue *q, u32 job_timeout_ms)
{ {
struct xe_gpu_scheduler *sched = &q->guc->sched; struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
XE_WARN_ON(exec_queue_registered(q)); xe_assert(xe, !exec_queue_registered(q));
XE_WARN_ON(exec_queue_banned(q)); xe_assert(xe, !exec_queue_banned(q));
XE_WARN_ON(exec_queue_killed(q)); xe_assert(xe, !exec_queue_killed(q));
sched->base.timeout = job_timeout_ms; sched->base.timeout = job_timeout_ms;
...@@ -1309,8 +1320,10 @@ static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q) ...@@ -1309,8 +1320,10 @@ static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
static void guc_exec_queue_resume(struct xe_exec_queue *q) static void guc_exec_queue_resume(struct xe_exec_queue *q)
{ {
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME; struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
XE_WARN_ON(q->guc->suspend_pending); xe_assert(xe, !q->guc->suspend_pending);
guc_exec_queue_add_msg(q, msg, RESUME); guc_exec_queue_add_msg(q, msg, RESUME);
} }
...@@ -1405,8 +1418,9 @@ int xe_guc_submit_stop(struct xe_guc *guc) ...@@ -1405,8 +1418,9 @@ int xe_guc_submit_stop(struct xe_guc *guc)
{ {
struct xe_exec_queue *q; struct xe_exec_queue *q;
unsigned long index; unsigned long index;
struct xe_device *xe = guc_to_xe(guc);
XE_WARN_ON(guc_read_stopped(guc) != 1); xe_assert(xe, guc_read_stopped(guc) == 1);
mutex_lock(&guc->submission_state.lock); mutex_lock(&guc->submission_state.lock);
...@@ -1443,8 +1457,9 @@ int xe_guc_submit_start(struct xe_guc *guc) ...@@ -1443,8 +1457,9 @@ int xe_guc_submit_start(struct xe_guc *guc)
{ {
struct xe_exec_queue *q; struct xe_exec_queue *q;
unsigned long index; unsigned long index;
struct xe_device *xe = guc_to_xe(guc);
XE_WARN_ON(guc_read_stopped(guc) != 1); xe_assert(xe, guc_read_stopped(guc) == 1);
mutex_lock(&guc->submission_state.lock); mutex_lock(&guc->submission_state.lock);
atomic_dec(&guc->submission_state.stopped); atomic_dec(&guc->submission_state.stopped);
...@@ -1474,7 +1489,7 @@ g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id) ...@@ -1474,7 +1489,7 @@ g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
return NULL; return NULL;
} }
XE_WARN_ON(q->guc->id != guc_id); xe_assert(xe, q->guc->id == guc_id);
return q; return q;
} }
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "xe_huc.h" #include "xe_huc.h"
#include "regs/xe_guc_regs.h" #include "regs/xe_guc_regs.h"
#include "xe_assert.h"
#include "xe_bo.h" #include "xe_bo.h"
#include "xe_device.h" #include "xe_device.h"
#include "xe_force_wake.h" #include "xe_force_wake.h"
...@@ -72,7 +73,7 @@ int xe_huc_auth(struct xe_huc *huc) ...@@ -72,7 +73,7 @@ int xe_huc_auth(struct xe_huc *huc)
if (xe_uc_fw_is_disabled(&huc->fw)) if (xe_uc_fw_is_disabled(&huc->fw))
return 0; return 0;
XE_WARN_ON(xe_uc_fw_is_running(&huc->fw)); xe_assert(xe, !xe_uc_fw_is_running(&huc->fw));
if (!xe_uc_fw_is_loaded(&huc->fw)) if (!xe_uc_fw_is_loaded(&huc->fw))
return -ENOEXEC; return -ENOEXEC;
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "regs/xe_engine_regs.h" #include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h" #include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h" #include "regs/xe_regs.h"
#include "xe_assert.h"
#include "xe_bo.h" #include "xe_bo.h"
#include "xe_device.h" #include "xe_device.h"
#include "xe_execlist.h" #include "xe_execlist.h"
...@@ -244,7 +245,7 @@ static void hw_engine_fini(struct drm_device *drm, void *arg) ...@@ -244,7 +245,7 @@ static void hw_engine_fini(struct drm_device *drm, void *arg)
static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg, static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg,
u32 val) u32 val)
{ {
XE_WARN_ON(reg.addr & hwe->mmio_base); xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain); xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
reg.addr += hwe->mmio_base; reg.addr += hwe->mmio_base;
...@@ -254,7 +255,7 @@ static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg, ...@@ -254,7 +255,7 @@ static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg,
static u32 hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg) static u32 hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
{ {
XE_WARN_ON(reg.addr & hwe->mmio_base); xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain); xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
reg.addr += hwe->mmio_base; reg.addr += hwe->mmio_base;
...@@ -374,7 +375,7 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe, ...@@ -374,7 +375,7 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
info = &engine_infos[id]; info = &engine_infos[id];
XE_WARN_ON(hwe->gt); xe_gt_assert(gt, !hwe->gt);
hwe->gt = gt; hwe->gt = gt;
hwe->class = info->class; hwe->class = info->class;
...@@ -415,8 +416,8 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe, ...@@ -415,8 +416,8 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
struct xe_tile *tile = gt_to_tile(gt); struct xe_tile *tile = gt_to_tile(gt);
int err; int err;
XE_WARN_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name); xe_gt_assert(gt, id < ARRAY_SIZE(engine_infos) && engine_infos[id].name);
XE_WARN_ON(!(gt->info.engine_mask & BIT(id))); xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt); xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
xe_reg_sr_apply_whitelist(hwe); xe_reg_sr_apply_whitelist(hwe);
......
...@@ -116,7 +116,7 @@ static void set_offsets(u32 *regs, ...@@ -116,7 +116,7 @@ static void set_offsets(u32 *regs,
*regs |= MI_LRI_LRM_CS_MMIO; *regs |= MI_LRI_LRM_CS_MMIO;
regs++; regs++;
XE_WARN_ON(!count); xe_gt_assert(hwe->gt, count);
do { do {
u32 offset = 0; u32 offset = 0;
u8 v; u8 v;
...@@ -608,7 +608,7 @@ static inline struct iosys_map __xe_lrc_##elem##_map(struct xe_lrc *lrc) \ ...@@ -608,7 +608,7 @@ static inline struct iosys_map __xe_lrc_##elem##_map(struct xe_lrc *lrc) \
{ \ { \
struct iosys_map map = lrc->bo->vmap; \ struct iosys_map map = lrc->bo->vmap; \
\ \
XE_WARN_ON(iosys_map_is_null(&map)); \ xe_assert(lrc_to_xe(lrc), !iosys_map_is_null(&map)); \
iosys_map_incr(&map, __xe_lrc_##elem##_offset(lrc)); \ iosys_map_incr(&map, __xe_lrc_##elem##_offset(lrc)); \
return map; \ return map; \
} \ } \
...@@ -827,16 +827,17 @@ static void __xe_lrc_write_ring(struct xe_lrc *lrc, struct iosys_map ring, ...@@ -827,16 +827,17 @@ static void __xe_lrc_write_ring(struct xe_lrc *lrc, struct iosys_map ring,
void xe_lrc_write_ring(struct xe_lrc *lrc, const void *data, size_t size) void xe_lrc_write_ring(struct xe_lrc *lrc, const void *data, size_t size)
{ {
struct xe_device *xe = lrc_to_xe(lrc);
struct iosys_map ring; struct iosys_map ring;
u32 rhs; u32 rhs;
size_t aligned_size; size_t aligned_size;
XE_WARN_ON(!IS_ALIGNED(size, 4)); xe_assert(xe, IS_ALIGNED(size, 4));
aligned_size = ALIGN(size, 8); aligned_size = ALIGN(size, 8);
ring = __xe_lrc_ring_map(lrc); ring = __xe_lrc_ring_map(lrc);
XE_WARN_ON(lrc->ring.tail >= lrc->ring.size); xe_assert(xe, lrc->ring.tail < lrc->ring.size);
rhs = lrc->ring.size - lrc->ring.tail; rhs = lrc->ring.size - lrc->ring.tail;
if (size > rhs) { if (size > rhs) {
__xe_lrc_write_ring(lrc, ring, data, rhs); __xe_lrc_write_ring(lrc, ring, data, rhs);
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "generated/xe_wa_oob.h" #include "generated/xe_wa_oob.h"
#include "regs/xe_gpu_commands.h" #include "regs/xe_gpu_commands.h"
#include "tests/xe_test.h" #include "tests/xe_test.h"
#include "xe_assert.h"
#include "xe_bb.h" #include "xe_bb.h"
#include "xe_bo.h" #include "xe_bo.h"
#include "xe_exec_queue.h" #include "xe_exec_queue.h"
...@@ -172,7 +173,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, ...@@ -172,7 +173,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1)); BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
/* Need to be sure everything fits in the first PT, or create more */ /* Need to be sure everything fits in the first PT, or create more */
XE_WARN_ON(m->batch_base_ofs + batch->size >= SZ_2M); xe_tile_assert(tile, m->batch_base_ofs + batch->size < SZ_2M);
bo = xe_bo_create_pin_map(vm->xe, tile, vm, bo = xe_bo_create_pin_map(vm->xe, tile, vm,
num_entries * XE_PAGE_SIZE, num_entries * XE_PAGE_SIZE,
...@@ -206,7 +207,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, ...@@ -206,7 +207,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
} }
if (!IS_DGFX(xe)) { if (!IS_DGFX(xe)) {
XE_WARN_ON(xe->info.supports_usm); xe_tile_assert(tile, !xe->info.supports_usm);
/* Write out batch too */ /* Write out batch too */
m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE; m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
...@@ -487,7 +488,7 @@ static void emit_pte(struct xe_migrate *m, ...@@ -487,7 +488,7 @@ static void emit_pte(struct xe_migrate *m,
/* Is this a 64K PTE entry? */ /* Is this a 64K PTE entry? */
if ((m->q->vm->flags & XE_VM_FLAG_64K) && if ((m->q->vm->flags & XE_VM_FLAG_64K) &&
!(cur_ofs & (16 * 8 - 1))) { !(cur_ofs & (16 * 8 - 1))) {
XE_WARN_ON(!IS_ALIGNED(addr, SZ_64K)); xe_tile_assert(m->tile, IS_ALIGNED(addr, SZ_64K));
addr |= XE_PTE_PS64; addr |= XE_PTE_PS64;
} }
...@@ -516,7 +517,7 @@ static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb, ...@@ -516,7 +517,7 @@ static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size), num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
NUM_CCS_BYTES_PER_BLOCK); NUM_CCS_BYTES_PER_BLOCK);
XE_WARN_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER); xe_gt_assert(gt, num_ccs_blks <= NUM_CCS_BLKS_PER_XFER);
*cs++ = XY_CTRL_SURF_COPY_BLT | *cs++ = XY_CTRL_SURF_COPY_BLT |
(src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT | (src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
(dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT | (dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
...@@ -536,9 +537,9 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb, ...@@ -536,9 +537,9 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
u64 src_ofs, u64 dst_ofs, unsigned int size, u64 src_ofs, u64 dst_ofs, unsigned int size,
unsigned int pitch) unsigned int pitch)
{ {
XE_WARN_ON(size / pitch > S16_MAX); xe_gt_assert(gt, size / pitch <= S16_MAX);
XE_WARN_ON(pitch / 4 > S16_MAX); xe_gt_assert(gt, pitch / 4 <= S16_MAX);
XE_WARN_ON(pitch > U16_MAX); xe_gt_assert(gt, pitch <= U16_MAX);
bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2); bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch; bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch;
...@@ -598,7 +599,7 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m, ...@@ -598,7 +599,7 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
* At the moment, we don't support copying CCS metadata from * At the moment, we don't support copying CCS metadata from
* system to system. * system to system.
*/ */
XE_WARN_ON(!src_is_vram && !dst_is_vram); xe_gt_assert(gt, src_is_vram || dst_is_vram);
emit_copy_ccs(gt, bb, dst_ofs, dst_is_vram, src_ofs, emit_copy_ccs(gt, bb, dst_ofs, dst_is_vram, src_ofs,
src_is_vram, dst_size); src_is_vram, dst_size);
...@@ -810,7 +811,7 @@ static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs ...@@ -810,7 +811,7 @@ static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs
*cs++ = upper_32_bits(src_ofs); *cs++ = upper_32_bits(src_ofs);
*cs++ = FIELD_PREP(PVC_MS_MOCS_INDEX_MASK, mocs); *cs++ = FIELD_PREP(PVC_MS_MOCS_INDEX_MASK, mocs);
XE_WARN_ON(cs - bb->cs != len + bb->len); xe_gt_assert(gt, cs - bb->cs == len + bb->len);
bb->len += len; bb->len += len;
} }
...@@ -848,7 +849,7 @@ static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb, ...@@ -848,7 +849,7 @@ static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
*cs++ = 0; *cs++ = 0;
} }
XE_WARN_ON(cs - bb->cs != len + bb->len); xe_gt_assert(gt, cs - bb->cs == len + bb->len);
bb->len += len; bb->len += len;
} }
...@@ -1021,9 +1022,9 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, ...@@ -1021,9 +1022,9 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
* PDE. This requires a BO that is almost vm->size big. * PDE. This requires a BO that is almost vm->size big.
* *
* This shouldn't be possible in practice.. might change when 16K * This shouldn't be possible in practice.. might change when 16K
* pages are used. Hence the XE_WARN_ON. * pages are used. Hence the assert.
*/ */
XE_WARN_ON(update->qwords > 0x1ff); xe_tile_assert(tile, update->qwords <= 0x1ff);
if (!ppgtt_ofs) { if (!ppgtt_ofs) {
ppgtt_ofs = xe_migrate_vram_ofs(xe_bo_addr(update->pt_bo, 0, ppgtt_ofs = xe_migrate_vram_ofs(xe_bo_addr(update->pt_bo, 0,
XE_PAGE_SIZE)); XE_PAGE_SIZE));
...@@ -1213,7 +1214,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, ...@@ -1213,7 +1214,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
* Worst case: Sum(2 * (each lower level page size) + (top level page size)) * Worst case: Sum(2 * (each lower level page size) + (top level page size))
* Should be reasonably bound.. * Should be reasonably bound..
*/ */
XE_WARN_ON(batch_size >= SZ_128K); xe_tile_assert(tile, batch_size < SZ_128K);
bb = xe_bb_new(gt, batch_size, !q && xe->info.supports_usm); bb = xe_bb_new(gt, batch_size, !q && xe->info.supports_usm);
if (IS_ERR(bb)) if (IS_ERR(bb))
...@@ -1223,7 +1224,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, ...@@ -1223,7 +1224,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
if (!IS_DGFX(xe)) { if (!IS_DGFX(xe)) {
ppgtt_ofs = NUM_KERNEL_PDE - 1; ppgtt_ofs = NUM_KERNEL_PDE - 1;
if (q) { if (q) {
XE_WARN_ON(num_updates > NUM_VMUSA_WRITES_PER_UNIT); xe_tile_assert(tile, num_updates <= NUM_VMUSA_WRITES_PER_UNIT);
sa_bo = drm_suballoc_new(&m->vm_update_sa, 1, sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
GFP_KERNEL, true, 0); GFP_KERNEL, true, 0);
...@@ -1252,7 +1253,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, ...@@ -1252,7 +1253,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
for (i = 0; i < num_updates; i++) { for (i = 0; i < num_updates; i++) {
struct xe_bo *pt_bo = updates[i].pt_bo; struct xe_bo *pt_bo = updates[i].pt_bo;
XE_WARN_ON(pt_bo->size != SZ_4K); xe_tile_assert(tile, pt_bo->size == SZ_4K);
addr = xe_pte_encode(pt_bo, 0, XE_CACHE_WB, 0); addr = xe_pte_encode(pt_bo, 0, XE_CACHE_WB, 0);
bb->cs[bb->len++] = lower_32_bits(addr); bb->cs[bb->len++] = lower_32_bits(addr);
......
...@@ -463,7 +463,7 @@ static unsigned int get_mocs_settings(struct xe_device *xe, ...@@ -463,7 +463,7 @@ static unsigned int get_mocs_settings(struct xe_device *xe,
* is still 0 at this point, we'll assume that it was omitted by * is still 0 at this point, we'll assume that it was omitted by
* mistake in the switch statement above. * mistake in the switch statement above.
*/ */
XE_WARN_ON(info->unused_entries_index == 0); xe_assert(xe, info->unused_entries_index != 0);
if (XE_WARN_ON(info->size > info->n_entries)) { if (XE_WARN_ON(info->size > info->n_entries)) {
info->table = NULL; info->table = NULL;
......
...@@ -196,7 +196,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, ...@@ -196,7 +196,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
pt->level = level; pt->level = level;
pt->base.dir = level ? &as_xe_pt_dir(pt)->dir : NULL; pt->base.dir = level ? &as_xe_pt_dir(pt)->dir : NULL;
XE_WARN_ON(level > XE_VM_MAX_LEVEL); xe_tile_assert(tile, level <= XE_VM_MAX_LEVEL);
return pt; return pt;
...@@ -1004,7 +1004,7 @@ xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma, ...@@ -1004,7 +1004,7 @@ xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
*num_entries = 0; *num_entries = 0;
err = xe_pt_stage_bind(tile, vma, entries, num_entries); err = xe_pt_stage_bind(tile, vma, entries, num_entries);
if (!err) if (!err)
XE_WARN_ON(!*num_entries); xe_tile_assert(tile, *num_entries);
else /* abort! */ else /* abort! */
xe_pt_abort_bind(vma, entries, *num_entries); xe_pt_abort_bind(vma, entries, *num_entries);
...@@ -1026,7 +1026,7 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe, ...@@ -1026,7 +1026,7 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe,
u64 end; u64 end;
u64 start; u64 start;
XE_WARN_ON(entry->pt->is_compact); xe_assert(xe, !entry->pt->is_compact);
start = entry->ofs * page_size; start = entry->ofs * page_size;
end = start + page_size * entry->qwords; end = start + page_size * entry->qwords;
vm_dbg(&xe->drm, vm_dbg(&xe->drm,
...@@ -1276,7 +1276,7 @@ static int invalidation_fence_init(struct xe_gt *gt, ...@@ -1276,7 +1276,7 @@ static int invalidation_fence_init(struct xe_gt *gt,
dma_fence_put(&ifence->base.base); /* Creation ref */ dma_fence_put(&ifence->base.base); /* Creation ref */
} }
XE_WARN_ON(ret && ret != -ENOENT); xe_gt_assert(gt, !ret || ret == -ENOENT);
return ret && ret != -ENOENT ? ret : 0; return ret && ret != -ENOENT ? ret : 0;
} }
...@@ -1356,7 +1356,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue ...@@ -1356,7 +1356,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
err = xe_pt_prepare_bind(tile, vma, entries, &num_entries, rebind); err = xe_pt_prepare_bind(tile, vma, entries, &num_entries, rebind);
if (err) if (err)
goto err; goto err;
XE_WARN_ON(num_entries > ARRAY_SIZE(entries)); xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries); xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
xe_pt_calc_rfence_interval(vma, &bind_pt_update, entries, xe_pt_calc_rfence_interval(vma, &bind_pt_update, entries,
...@@ -1707,7 +1707,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu ...@@ -1707,7 +1707,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu
xe_vma_start(vma), xe_vma_end(vma) - 1, q); xe_vma_start(vma), xe_vma_end(vma) - 1, q);
num_entries = xe_pt_stage_unbind(tile, vma, entries); num_entries = xe_pt_stage_unbind(tile, vma, entries);
XE_WARN_ON(num_entries > ARRAY_SIZE(entries)); xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries); xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries, xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries,
...@@ -1773,7 +1773,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu ...@@ -1773,7 +1773,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu
list_del_init(&vma->combined_links.rebind); list_del_init(&vma->combined_links.rebind);
if (unbind_pt_update.locked) { if (unbind_pt_update.locked) {
XE_WARN_ON(!xe_vma_is_userptr(vma)); xe_tile_assert(tile, xe_vma_is_userptr(vma));
if (!vma->tile_present) { if (!vma->tile_present) {
spin_lock(&vm->userptr.invalidated_lock); spin_lock(&vm->userptr.invalidated_lock);
......
...@@ -212,6 +212,7 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc ...@@ -212,6 +212,7 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
u32 dw[MAX_JOB_SIZE_DW], i = 0; u32 dw[MAX_JOB_SIZE_DW], i = 0;
u32 ppgtt_flag = get_ppgtt_flag(job); u32 ppgtt_flag = get_ppgtt_flag(job);
struct xe_vm *vm = job->q->vm; struct xe_vm *vm = job->q->vm;
struct xe_gt *gt = job->q->gt;
if (vm && vm->batch_invalidate_tlb) { if (vm && vm->batch_invalidate_tlb) {
dw[i++] = preparser_disable(true); dw[i++] = preparser_disable(true);
...@@ -234,7 +235,7 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc ...@@ -234,7 +235,7 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
i = emit_user_interrupt(dw, i); i = emit_user_interrupt(dw, i);
XE_WARN_ON(i > MAX_JOB_SIZE_DW); xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW);
xe_lrc_write_ring(lrc, dw, i * sizeof(*dw)); xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
} }
...@@ -294,7 +295,7 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc, ...@@ -294,7 +295,7 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
i = emit_user_interrupt(dw, i); i = emit_user_interrupt(dw, i);
XE_WARN_ON(i > MAX_JOB_SIZE_DW); xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW);
xe_lrc_write_ring(lrc, dw, i * sizeof(*dw)); xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
} }
...@@ -342,7 +343,7 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job, ...@@ -342,7 +343,7 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
i = emit_user_interrupt(dw, i); i = emit_user_interrupt(dw, i);
XE_WARN_ON(i > MAX_JOB_SIZE_DW); xe_gt_assert(gt, i <= MAX_JOB_SIZE_DW);
xe_lrc_write_ring(lrc, dw, i * sizeof(*dw)); xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
} }
...@@ -372,14 +373,16 @@ static void emit_migration_job_gen12(struct xe_sched_job *job, ...@@ -372,14 +373,16 @@ static void emit_migration_job_gen12(struct xe_sched_job *job,
i = emit_user_interrupt(dw, i); i = emit_user_interrupt(dw, i);
XE_WARN_ON(i > MAX_JOB_SIZE_DW); xe_gt_assert(job->q->gt, i <= MAX_JOB_SIZE_DW);
xe_lrc_write_ring(lrc, dw, i * sizeof(*dw)); xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
} }
static void emit_job_gen12_gsc(struct xe_sched_job *job) static void emit_job_gen12_gsc(struct xe_sched_job *job)
{ {
XE_WARN_ON(job->q->width > 1); /* no parallel submission for GSCCS */ struct xe_gt *gt = job->q->gt;
xe_gt_assert(gt, job->q->width <= 1); /* no parallel submission for GSCCS */
__emit_job_gen12_simple(job, job->q->lrc, __emit_job_gen12_simple(job, job->q->lrc,
job->batch_addr[0], job->batch_addr[0],
......
...@@ -143,7 +143,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q, ...@@ -143,7 +143,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
/* Sanity check */ /* Sanity check */
for (j = 0; j < q->width; ++j) for (j = 0; j < q->width; ++j)
XE_WARN_ON(cf->base.seqno != fences[j]->seqno); xe_assert(job_to_xe(job), cf->base.seqno == fences[j]->seqno);
job->fence = &cf->base; job->fence = &cf->base;
} }
......
...@@ -162,7 +162,7 @@ int xe_uc_init_hw(struct xe_uc *uc) ...@@ -162,7 +162,7 @@ int xe_uc_init_hw(struct xe_uc *uc)
/* We don't fail the driver load if HuC fails to auth, but let's warn */ /* We don't fail the driver load if HuC fails to auth, but let's warn */
ret = xe_huc_auth(&uc->huc); ret = xe_huc_auth(&uc->huc);
XE_WARN_ON(ret); xe_gt_assert(uc_to_gt(uc), !ret);
return 0; return 0;
} }
......
...@@ -195,7 +195,7 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw) ...@@ -195,7 +195,7 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw)
u32 count; u32 count;
int i; int i;
XE_WARN_ON(uc_fw->type >= ARRAY_SIZE(blobs_all)); xe_assert(xe, uc_fw->type < ARRAY_SIZE(blobs_all));
entries = blobs_all[uc_fw->type].entries; entries = blobs_all[uc_fw->type].entries;
count = blobs_all[uc_fw->type].count; count = blobs_all[uc_fw->type].count;
...@@ -224,8 +224,8 @@ size_t xe_uc_fw_copy_rsa(struct xe_uc_fw *uc_fw, void *dst, u32 max_len) ...@@ -224,8 +224,8 @@ size_t xe_uc_fw_copy_rsa(struct xe_uc_fw *uc_fw, void *dst, u32 max_len)
struct xe_device *xe = uc_fw_to_xe(uc_fw); struct xe_device *xe = uc_fw_to_xe(uc_fw);
u32 size = min_t(u32, uc_fw->rsa_size, max_len); u32 size = min_t(u32, uc_fw->rsa_size, max_len);
XE_WARN_ON(size % 4); xe_assert(xe, !(size % 4));
XE_WARN_ON(!xe_uc_fw_is_available(uc_fw)); xe_assert(xe, xe_uc_fw_is_available(uc_fw));
xe_map_memcpy_from(xe, dst, &uc_fw->bo->vmap, xe_map_memcpy_from(xe, dst, &uc_fw->bo->vmap,
xe_uc_fw_rsa_offset(uc_fw), size); xe_uc_fw_rsa_offset(uc_fw), size);
...@@ -249,8 +249,8 @@ static void guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css) ...@@ -249,8 +249,8 @@ static void guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css)
struct xe_gt *gt = uc_fw_to_gt(uc_fw); struct xe_gt *gt = uc_fw_to_gt(uc_fw);
struct xe_guc *guc = &gt->uc.guc; struct xe_guc *guc = &gt->uc.guc;
XE_WARN_ON(uc_fw->type != XE_UC_FW_TYPE_GUC); xe_gt_assert(gt, uc_fw->type == XE_UC_FW_TYPE_GUC);
XE_WARN_ON(uc_fw->major_ver_found < 70); xe_gt_assert(gt, uc_fw->major_ver_found >= 70);
if (uc_fw->major_ver_found > 70 || uc_fw->minor_ver_found >= 6) { if (uc_fw->major_ver_found > 70 || uc_fw->minor_ver_found >= 6) {
/* v70.6.0 adds CSS header support */ /* v70.6.0 adds CSS header support */
...@@ -336,8 +336,8 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw) ...@@ -336,8 +336,8 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw)
* before we're looked at the HW caps to see if we have uc support * before we're looked at the HW caps to see if we have uc support
*/ */
BUILD_BUG_ON(XE_UC_FIRMWARE_UNINITIALIZED); BUILD_BUG_ON(XE_UC_FIRMWARE_UNINITIALIZED);
XE_WARN_ON(uc_fw->status); xe_assert(xe, !uc_fw->status);
XE_WARN_ON(uc_fw->path); xe_assert(xe, !uc_fw->path);
uc_fw_auto_select(xe, uc_fw); uc_fw_auto_select(xe, uc_fw);
xe_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ? xe_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
...@@ -504,7 +504,7 @@ int xe_uc_fw_upload(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags) ...@@ -504,7 +504,7 @@ int xe_uc_fw_upload(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
int err; int err;
/* make sure the status was cleared the last time we reset the uc */ /* make sure the status was cleared the last time we reset the uc */
XE_WARN_ON(xe_uc_fw_is_loaded(uc_fw)); xe_assert(xe, !xe_uc_fw_is_loaded(uc_fw));
if (!xe_uc_fw_is_loadable(uc_fw)) if (!xe_uc_fw_is_loadable(uc_fw))
return -ENOEXEC; return -ENOEXEC;
......
...@@ -71,7 +71,7 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) ...@@ -71,7 +71,7 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
bool read_only = xe_vma_read_only(vma); bool read_only = xe_vma_read_only(vma);
lockdep_assert_held(&vm->lock); lockdep_assert_held(&vm->lock);
XE_WARN_ON(!xe_vma_is_userptr(vma)); xe_assert(xe, xe_vma_is_userptr(vma));
retry: retry:
if (vma->gpuva.flags & XE_VMA_DESTROYED) if (vma->gpuva.flags & XE_VMA_DESTROYED)
return 0; return 0;
...@@ -260,7 +260,7 @@ static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list) ...@@ -260,7 +260,7 @@ static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
struct dma_fence *fence; struct dma_fence *fence;
link = list->next; link = list->next;
XE_WARN_ON(link == list); xe_assert(vm->xe, link != list);
fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link), fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
q, q->compute.context, q, q->compute.context,
...@@ -338,7 +338,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) ...@@ -338,7 +338,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
int err; int err;
bool wait; bool wait;
XE_WARN_ON(!xe_vm_in_compute_mode(vm)); xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
down_write(&vm->lock); down_write(&vm->lock);
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
...@@ -573,7 +573,7 @@ static void preempt_rebind_work_func(struct work_struct *w) ...@@ -573,7 +573,7 @@ static void preempt_rebind_work_func(struct work_struct *w)
long wait; long wait;
int __maybe_unused tries = 0; int __maybe_unused tries = 0;
XE_WARN_ON(!xe_vm_in_compute_mode(vm)); xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
trace_xe_vm_rebind_worker_enter(vm); trace_xe_vm_rebind_worker_enter(vm);
down_write(&vm->lock); down_write(&vm->lock);
...@@ -698,7 +698,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, ...@@ -698,7 +698,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
struct dma_fence *fence; struct dma_fence *fence;
long err; long err;
XE_WARN_ON(!xe_vma_is_userptr(vma)); xe_assert(vm->xe, xe_vma_is_userptr(vma));
trace_xe_vma_userptr_invalidate(vma); trace_xe_vma_userptr_invalidate(vma);
if (!mmu_notifier_range_blockable(range)) if (!mmu_notifier_range_blockable(range))
...@@ -839,7 +839,7 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) ...@@ -839,7 +839,7 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
xe_vm_assert_held(vm); xe_vm_assert_held(vm);
list_for_each_entry_safe(vma, next, &vm->rebind_list, list_for_each_entry_safe(vma, next, &vm->rebind_list,
combined_links.rebind) { combined_links.rebind) {
XE_WARN_ON(!vma->tile_present); xe_assert(vm->xe, vma->tile_present);
list_del_init(&vma->combined_links.rebind); list_del_init(&vma->combined_links.rebind);
dma_fence_put(fence); dma_fence_put(fence);
...@@ -867,8 +867,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, ...@@ -867,8 +867,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_tile *tile; struct xe_tile *tile;
u8 id; u8 id;
XE_WARN_ON(start >= end); xe_assert(vm->xe, start < end);
XE_WARN_ON(end >= vm->size); xe_assert(vm->xe, end < vm->size);
if (!bo && !is_null) /* userptr */ if (!bo && !is_null) /* userptr */
vma = kzalloc(sizeof(*vma), GFP_KERNEL); vma = kzalloc(sizeof(*vma), GFP_KERNEL);
...@@ -1064,10 +1064,10 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence) ...@@ -1064,10 +1064,10 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
struct xe_vm *vm = xe_vma_vm(vma); struct xe_vm *vm = xe_vma_vm(vma);
lockdep_assert_held_write(&vm->lock); lockdep_assert_held_write(&vm->lock);
XE_WARN_ON(!list_empty(&vma->combined_links.destroy)); xe_assert(vm->xe, list_empty(&vma->combined_links.destroy));
if (xe_vma_is_userptr(vma)) { if (xe_vma_is_userptr(vma)) {
XE_WARN_ON(!(vma->gpuva.flags & XE_VMA_DESTROYED)); xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
spin_lock(&vm->userptr.invalidated_lock); spin_lock(&vm->userptr.invalidated_lock);
list_del(&vma->userptr.invalidate_link); list_del(&vma->userptr.invalidate_link);
...@@ -1160,7 +1160,7 @@ xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range) ...@@ -1160,7 +1160,7 @@ xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range)
if (xe_vm_is_closed_or_banned(vm)) if (xe_vm_is_closed_or_banned(vm))
return NULL; return NULL;
XE_WARN_ON(start + range > vm->size); xe_assert(vm->xe, start + range <= vm->size);
gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range); gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range);
...@@ -1171,7 +1171,7 @@ static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) ...@@ -1171,7 +1171,7 @@ static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
{ {
int err; int err;
XE_WARN_ON(xe_vma_vm(vma) != vm); xe_assert(vm->xe, xe_vma_vm(vma) == vm);
lockdep_assert_held(&vm->lock); lockdep_assert_held(&vm->lock);
err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva); err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva);
...@@ -1182,7 +1182,7 @@ static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) ...@@ -1182,7 +1182,7 @@ static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma)
static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma) static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma)
{ {
XE_WARN_ON(xe_vma_vm(vma) != vm); xe_assert(vm->xe, xe_vma_vm(vma) == vm);
lockdep_assert_held(&vm->lock); lockdep_assert_held(&vm->lock);
drm_gpuva_remove(&vma->gpuva); drm_gpuva_remove(&vma->gpuva);
...@@ -1428,7 +1428,7 @@ void xe_vm_close_and_put(struct xe_vm *vm) ...@@ -1428,7 +1428,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
struct drm_gpuva *gpuva, *next; struct drm_gpuva *gpuva, *next;
u8 id; u8 id;
XE_WARN_ON(vm->preempt.num_exec_queues); xe_assert(xe, !vm->preempt.num_exec_queues);
xe_vm_close(vm); xe_vm_close(vm);
flush_async_ops(vm); flush_async_ops(vm);
...@@ -1505,7 +1505,7 @@ void xe_vm_close_and_put(struct xe_vm *vm) ...@@ -1505,7 +1505,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
if (vm->async_ops.error_capture.addr) if (vm->async_ops.error_capture.addr)
wake_up_all(&vm->async_ops.error_capture.wq); wake_up_all(&vm->async_ops.error_capture.wq);
XE_WARN_ON(!list_empty(&vm->extobj.list)); xe_assert(xe, list_empty(&vm->extobj.list));
up_write(&vm->lock); up_write(&vm->lock);
mutex_lock(&xe->usm.lock); mutex_lock(&xe->usm.lock);
...@@ -1531,7 +1531,7 @@ static void vm_destroy_work_func(struct work_struct *w) ...@@ -1531,7 +1531,7 @@ static void vm_destroy_work_func(struct work_struct *w)
void *lookup; void *lookup;
/* xe_vm_close_and_put was not called? */ /* xe_vm_close_and_put was not called? */
XE_WARN_ON(vm->size); xe_assert(xe, !vm->size);
if (!(vm->flags & XE_VM_FLAG_MIGRATION)) { if (!(vm->flags & XE_VM_FLAG_MIGRATION)) {
xe_device_mem_access_put(xe); xe_device_mem_access_put(xe);
...@@ -1539,7 +1539,7 @@ static void vm_destroy_work_func(struct work_struct *w) ...@@ -1539,7 +1539,7 @@ static void vm_destroy_work_func(struct work_struct *w)
if (xe->info.has_asid) { if (xe->info.has_asid) {
mutex_lock(&xe->usm.lock); mutex_lock(&xe->usm.lock);
lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid); lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
XE_WARN_ON(lookup != vm); xe_assert(xe, lookup == vm);
mutex_unlock(&xe->usm.lock); mutex_unlock(&xe->usm.lock);
} }
} }
...@@ -1802,7 +1802,7 @@ int xe_vm_async_fence_wait_start(struct dma_fence *fence) ...@@ -1802,7 +1802,7 @@ int xe_vm_async_fence_wait_start(struct dma_fence *fence)
struct async_op_fence *afence = struct async_op_fence *afence =
container_of(fence, struct async_op_fence, fence); container_of(fence, struct async_op_fence, fence);
XE_WARN_ON(xe_vm_no_dma_fences(afence->vm)); xe_assert(afence->vm->xe, !xe_vm_no_dma_fences(afence->vm));
smp_rmb(); smp_rmb();
return wait_event_interruptible(afence->wq, afence->started); return wait_event_interruptible(afence->wq, afence->started);
...@@ -1828,7 +1828,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, ...@@ -1828,7 +1828,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
} else { } else {
int i; int i;
XE_WARN_ON(!xe_vm_in_fault_mode(vm)); xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
fence = dma_fence_get_stub(); fence = dma_fence_get_stub();
if (last_op) { if (last_op) {
...@@ -2110,7 +2110,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, ...@@ -2110,7 +2110,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
{ {
int err; int err;
XE_WARN_ON(region > ARRAY_SIZE(region_to_mem_type)); xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
if (!xe_vma_has_no_bo(vma)) { if (!xe_vma_has_no_bo(vma)) {
err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]); err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]);
...@@ -2309,7 +2309,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, ...@@ -2309,7 +2309,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
} }
break; break;
case XE_VM_BIND_OP_UNMAP_ALL: case XE_VM_BIND_OP_UNMAP_ALL:
XE_WARN_ON(!bo); xe_assert(vm->xe, bo);
err = xe_bo_lock(bo, true); err = xe_bo_lock(bo, true);
if (err) if (err)
...@@ -2506,7 +2506,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, ...@@ -2506,7 +2506,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
struct xe_vma_op *op = gpuva_op_to_vma_op(__op); struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
bool first = list_empty(ops_list); bool first = list_empty(ops_list);
XE_WARN_ON(!first && !async); xe_assert(vm->xe, first || async);
INIT_LIST_HEAD(&op->link); INIT_LIST_HEAD(&op->link);
list_add_tail(&op->link, ops_list); list_add_tail(&op->link, ops_list);
...@@ -3468,8 +3468,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) ...@@ -3468,8 +3468,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
u8 id; u8 id;
int ret; int ret;
XE_WARN_ON(!xe_vm_in_fault_mode(xe_vma_vm(vma))); xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
XE_WARN_ON(xe_vma_is_null(vma)); xe_assert(xe, !xe_vma_is_null(vma));
trace_xe_vma_usm_invalidate(vma); trace_xe_vma_usm_invalidate(vma);
/* Check that we don't race with page-table updates */ /* Check that we don't race with page-table updates */
......
...@@ -182,7 +182,7 @@ extern struct ttm_device_funcs xe_ttm_funcs; ...@@ -182,7 +182,7 @@ extern struct ttm_device_funcs xe_ttm_funcs;
static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm) static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
{ {
XE_WARN_ON(!xe_vm_in_compute_mode(vm)); xe_assert(vm->xe, xe_vm_in_compute_mode(vm));
queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work); queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment