Commit b8c1ba83 authored by Francois Dugast's avatar Francois Dugast Committed by Rodrigo Vivi

drm/xe: Prevent flooding the kernel log with XE_IOCTL_ERR

Lower log level of XE_IOCTL_ERR macro to debug in order to prevent flooding
kernel log.

v2: Rename XE_IOCTL_ERR to XE_IOCTL_DBG (Rodrigo Vivi)
v3: Rebase
v4: Fix style, remove unrelated change about __FILE__ and __LINE__

Link: https://lists.freedesktop.org/archives/intel-xe/2023-May/004704.htmlSigned-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 5ce58303
......@@ -1724,35 +1724,35 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
u32 handle;
int err;
if (XE_IOCTL_ERR(xe, args->extensions) || XE_IOCTL_ERR(xe, args->pad) ||
XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
if (XE_IOCTL_DBG(xe, args->extensions) || XE_IOCTL_DBG(xe, args->pad) ||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
if (XE_IOCTL_ERR(xe, args->flags &
if (XE_IOCTL_DBG(xe, args->flags &
~(XE_GEM_CREATE_FLAG_DEFER_BACKING |
XE_GEM_CREATE_FLAG_SCANOUT |
xe->info.mem_region_mask)))
return -EINVAL;
/* at least one memory type must be specified */
if (XE_IOCTL_ERR(xe, !(args->flags & xe->info.mem_region_mask)))
if (XE_IOCTL_DBG(xe, !(args->flags & xe->info.mem_region_mask)))
return -EINVAL;
if (XE_IOCTL_ERR(xe, args->handle))
if (XE_IOCTL_DBG(xe, args->handle))
return -EINVAL;
if (XE_IOCTL_ERR(xe, !args->size))
if (XE_IOCTL_DBG(xe, !args->size))
return -EINVAL;
if (XE_IOCTL_ERR(xe, args->size > SIZE_MAX))
if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX))
return -EINVAL;
if (XE_IOCTL_ERR(xe, args->size & ~PAGE_MASK))
if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK))
return -EINVAL;
if (args->vm_id) {
vm = xe_vm_lookup(xef, args->vm_id);
if (XE_IOCTL_ERR(xe, !vm))
if (XE_IOCTL_DBG(xe, !vm))
return -ENOENT;
err = xe_vm_lock(vm, &ww, 0, true);
if (err) {
......@@ -1795,15 +1795,15 @@ int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
struct drm_xe_gem_mmap_offset *args = data;
struct drm_gem_object *gem_obj;
if (XE_IOCTL_ERR(xe, args->extensions) ||
XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
if (XE_IOCTL_DBG(xe, args->extensions) ||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
if (XE_IOCTL_ERR(xe, args->flags))
if (XE_IOCTL_DBG(xe, args->flags))
return -EINVAL;
gem_obj = drm_gem_object_lookup(file, args->handle);
if (XE_IOCTL_ERR(xe, !gem_obj))
if (XE_IOCTL_DBG(xe, !gem_obj))
return -ENOENT;
/* The mmap offset was set up at BO allocation time. */
......
This diff is collapsed.
......@@ -184,22 +184,22 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
bool write_locked;
int err = 0;
if (XE_IOCTL_ERR(xe, args->extensions) ||
XE_IOCTL_ERR(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
if (XE_IOCTL_DBG(xe, args->extensions) ||
XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
engine = xe_engine_lookup(xef, args->engine_id);
if (XE_IOCTL_ERR(xe, !engine))
if (XE_IOCTL_DBG(xe, !engine))
return -ENOENT;
if (XE_IOCTL_ERR(xe, engine->flags & ENGINE_FLAG_VM))
if (XE_IOCTL_DBG(xe, engine->flags & ENGINE_FLAG_VM))
return -EINVAL;
if (XE_IOCTL_ERR(xe, engine->width != args->num_batch_buffer))
if (XE_IOCTL_DBG(xe, engine->width != args->num_batch_buffer))
return -EINVAL;
if (XE_IOCTL_ERR(xe, engine->flags & ENGINE_FLAG_BANNED)) {
if (XE_IOCTL_DBG(xe, engine->flags & ENGINE_FLAG_BANNED)) {
err = -ECANCELED;
goto err_engine;
}
......
......@@ -12,8 +12,8 @@
#define XE_WARN_ON WARN_ON
#define XE_BUG_ON BUG_ON
#define XE_IOCTL_ERR(xe, cond) \
((cond) && (drm_info(&(xe)->drm, \
#define XE_IOCTL_DBG(xe, cond) \
((cond) && (drm_dbg(&(xe)->drm, \
"Ioctl argument check failed at %s:%d: %s", \
__FILE__, __LINE__, #cond), 1))
......
......@@ -447,14 +447,14 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data,
bool allowed;
int ret = 0;
if (XE_IOCTL_ERR(xe, args->extensions) ||
XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
if (XE_IOCTL_DBG(xe, args->extensions) ||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
if (XE_IOCTL_ERR(xe, args->flags & ~VALID_MMIO_FLAGS))
if (XE_IOCTL_DBG(xe, args->flags & ~VALID_MMIO_FLAGS))
return -EINVAL;
if (XE_IOCTL_ERR(xe, !(args->flags & DRM_XE_MMIO_WRITE) && args->value))
if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_MMIO_WRITE) && args->value))
return -EINVAL;
allowed = capable(CAP_SYS_ADMIN);
......@@ -469,12 +469,12 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data,
}
}
if (XE_IOCTL_ERR(xe, !allowed))
if (XE_IOCTL_DBG(xe, !allowed))
return -EPERM;
bits_flag = args->flags & DRM_XE_MMIO_BITS_MASK;
bytes = 1 << bits_flag;
if (XE_IOCTL_ERR(xe, args->addr + bytes > xe->mmio.size))
if (XE_IOCTL_DBG(xe, args->addr + bytes > xe->mmio.size))
return -EINVAL;
/*
......@@ -488,7 +488,7 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data,
if (args->flags & DRM_XE_MMIO_WRITE) {
switch (bits_flag) {
case DRM_XE_MMIO_32BIT:
if (XE_IOCTL_ERR(xe, args->value > U32_MAX)) {
if (XE_IOCTL_DBG(xe, args->value > U32_MAX)) {
ret = -EINVAL;
goto exit;
}
......
......@@ -60,12 +60,12 @@ static int query_engines(struct xe_device *xe,
if (query->size == 0) {
query->size = size;
return 0;
} else if (XE_IOCTL_ERR(xe, query->size != size)) {
} else if (XE_IOCTL_DBG(xe, query->size != size)) {
return -EINVAL;
}
hw_engine_info = kmalloc(size, GFP_KERNEL);
if (XE_IOCTL_ERR(xe, !hw_engine_info))
if (XE_IOCTL_DBG(xe, !hw_engine_info))
return -ENOMEM;
for_each_gt(gt, xe, gt_id)
......@@ -114,12 +114,12 @@ static int query_memory_usage(struct xe_device *xe,
if (query->size == 0) {
query->size = size;
return 0;
} else if (XE_IOCTL_ERR(xe, query->size != size)) {
} else if (XE_IOCTL_DBG(xe, query->size != size)) {
return -EINVAL;
}
usage = kzalloc(size, GFP_KERNEL);
if (XE_IOCTL_ERR(xe, !usage))
if (XE_IOCTL_DBG(xe, !usage))
return -ENOMEM;
man = ttm_manager_type(&xe->ttm, XE_PL_TT);
......@@ -177,12 +177,12 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
if (query->size == 0) {
query->size = size;
return 0;
} else if (XE_IOCTL_ERR(xe, query->size != size)) {
} else if (XE_IOCTL_DBG(xe, query->size != size)) {
return -EINVAL;
}
config = kzalloc(size, GFP_KERNEL);
if (XE_IOCTL_ERR(xe, !config))
if (XE_IOCTL_DBG(xe, !config))
return -ENOMEM;
config->num_params = num_params;
......@@ -226,12 +226,12 @@ static int query_gts(struct xe_device *xe, struct drm_xe_device_query *query)
if (query->size == 0) {
query->size = size;
return 0;
} else if (XE_IOCTL_ERR(xe, query->size != size)) {
} else if (XE_IOCTL_DBG(xe, query->size != size)) {
return -EINVAL;
}
gts = kzalloc(size, GFP_KERNEL);
if (XE_IOCTL_ERR(xe, !gts))
if (XE_IOCTL_DBG(xe, !gts))
return -ENOMEM;
gts->num_gt = xe->info.gt_count;
......@@ -273,12 +273,12 @@ static int query_hwconfig(struct xe_device *xe,
if (query->size == 0) {
query->size = size;
return 0;
} else if (XE_IOCTL_ERR(xe, query->size != size)) {
} else if (XE_IOCTL_DBG(xe, query->size != size)) {
return -EINVAL;
}
hwconfig = kzalloc(size, GFP_KERNEL);
if (XE_IOCTL_ERR(xe, !hwconfig))
if (XE_IOCTL_DBG(xe, !hwconfig))
return -ENOMEM;
xe_device_mem_access_get(xe);
......@@ -332,7 +332,7 @@ static int query_gt_topology(struct xe_device *xe,
if (query->size == 0) {
query->size = size;
return 0;
} else if (XE_IOCTL_ERR(xe, query->size != size)) {
} else if (XE_IOCTL_DBG(xe, query->size != size)) {
return -EINVAL;
}
......@@ -380,15 +380,15 @@ int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_xe_device_query *query = data;
u32 idx;
if (XE_IOCTL_ERR(xe, query->extensions) ||
XE_IOCTL_ERR(xe, query->reserved[0] || query->reserved[1]))
if (XE_IOCTL_DBG(xe, query->extensions) ||
XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1]))
return -EINVAL;
if (XE_IOCTL_ERR(xe, query->query > ARRAY_SIZE(xe_query_funcs)))
if (XE_IOCTL_DBG(xe, query->query > ARRAY_SIZE(xe_query_funcs)))
return -EINVAL;
idx = array_index_nospec(query->query, ARRAY_SIZE(xe_query_funcs));
if (XE_IOCTL_ERR(xe, !xe_query_funcs[idx]))
if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx]))
return -EINVAL;
return xe_query_funcs[idx](xe, query);
......
......@@ -110,44 +110,44 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
if (copy_from_user(&sync_in, sync_user, sizeof(*sync_user)))
return -EFAULT;
if (XE_IOCTL_ERR(xe, sync_in.flags &
if (XE_IOCTL_DBG(xe, sync_in.flags &
~(SYNC_FLAGS_TYPE_MASK | DRM_XE_SYNC_SIGNAL)) ||
XE_IOCTL_ERR(xe, sync_in.pad) ||
XE_IOCTL_ERR(xe, sync_in.reserved[0] || sync_in.reserved[1]))
XE_IOCTL_DBG(xe, sync_in.pad) ||
XE_IOCTL_DBG(xe, sync_in.reserved[0] || sync_in.reserved[1]))
return -EINVAL;
signal = sync_in.flags & DRM_XE_SYNC_SIGNAL;
switch (sync_in.flags & SYNC_FLAGS_TYPE_MASK) {
case DRM_XE_SYNC_SYNCOBJ:
if (XE_IOCTL_ERR(xe, no_dma_fences && signal))
if (XE_IOCTL_DBG(xe, no_dma_fences && signal))
return -EOPNOTSUPP;
if (XE_IOCTL_ERR(xe, upper_32_bits(sync_in.addr)))
if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
return -EINVAL;
sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle);
if (XE_IOCTL_ERR(xe, !sync->syncobj))
if (XE_IOCTL_DBG(xe, !sync->syncobj))
return -ENOENT;
if (!signal) {
sync->fence = drm_syncobj_fence_get(sync->syncobj);
if (XE_IOCTL_ERR(xe, !sync->fence))
if (XE_IOCTL_DBG(xe, !sync->fence))
return -EINVAL;
}
break;
case DRM_XE_SYNC_TIMELINE_SYNCOBJ:
if (XE_IOCTL_ERR(xe, no_dma_fences && signal))
if (XE_IOCTL_DBG(xe, no_dma_fences && signal))
return -EOPNOTSUPP;
if (XE_IOCTL_ERR(xe, upper_32_bits(sync_in.addr)))
if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
return -EINVAL;
if (XE_IOCTL_ERR(xe, sync_in.timeline_value == 0))
if (XE_IOCTL_DBG(xe, sync_in.timeline_value == 0))
return -EINVAL;
sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle);
if (XE_IOCTL_ERR(xe, !sync->syncobj))
if (XE_IOCTL_DBG(xe, !sync->syncobj))
return -ENOENT;
if (signal) {
......@@ -156,7 +156,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
return -ENOMEM;
} else {
sync->fence = drm_syncobj_fence_get(sync->syncobj);
if (XE_IOCTL_ERR(xe, !sync->fence))
if (XE_IOCTL_DBG(xe, !sync->fence))
return -EINVAL;
err = dma_fence_chain_find_seqno(&sync->fence,
......@@ -167,15 +167,15 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
break;
case DRM_XE_SYNC_DMA_BUF:
if (XE_IOCTL_ERR(xe, "TODO"))
if (XE_IOCTL_DBG(xe, "TODO"))
return -EINVAL;
break;
case DRM_XE_SYNC_USER_FENCE:
if (XE_IOCTL_ERR(xe, !signal))
if (XE_IOCTL_DBG(xe, !signal))
return -EOPNOTSUPP;
if (XE_IOCTL_ERR(xe, sync_in.addr & 0x7))
if (XE_IOCTL_DBG(xe, sync_in.addr & 0x7))
return -EINVAL;
if (exec) {
......@@ -183,7 +183,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
} else {
sync->ufence = user_fence_create(xe, sync_in.addr,
sync_in.timeline_value);
if (XE_IOCTL_ERR(xe, !sync->ufence))
if (XE_IOCTL_DBG(xe, !sync->ufence))
return -ENOMEM;
}
......
This diff is collapsed.
......@@ -19,10 +19,10 @@ static int madvise_preferred_mem_class(struct xe_device *xe, struct xe_vm *vm,
{
int i, err;
if (XE_IOCTL_ERR(xe, value > XE_MEM_REGION_CLASS_VRAM))
if (XE_IOCTL_DBG(xe, value > XE_MEM_REGION_CLASS_VRAM))
return -EINVAL;
if (XE_IOCTL_ERR(xe, value == XE_MEM_REGION_CLASS_VRAM &&
if (XE_IOCTL_DBG(xe, value == XE_MEM_REGION_CLASS_VRAM &&
!xe->info.is_dgfx))
return -EINVAL;
......@@ -48,7 +48,7 @@ static int madvise_preferred_gt(struct xe_device *xe, struct xe_vm *vm,
{
int i, err;
if (XE_IOCTL_ERR(xe, value > xe->info.tile_count))
if (XE_IOCTL_DBG(xe, value > xe->info.tile_count))
return -EINVAL;
for (i = 0; i < num_vmas; ++i) {
......@@ -77,14 +77,14 @@ static int madvise_preferred_mem_class_gt(struct xe_device *xe,
u32 gt_id = upper_32_bits(value);
u32 mem_class = lower_32_bits(value);
if (XE_IOCTL_ERR(xe, mem_class > XE_MEM_REGION_CLASS_VRAM))
if (XE_IOCTL_DBG(xe, mem_class > XE_MEM_REGION_CLASS_VRAM))
return -EINVAL;
if (XE_IOCTL_ERR(xe, mem_class == XE_MEM_REGION_CLASS_VRAM &&
if (XE_IOCTL_DBG(xe, mem_class == XE_MEM_REGION_CLASS_VRAM &&
!xe->info.is_dgfx))
return -EINVAL;
if (XE_IOCTL_ERR(xe, gt_id > xe->info.tile_count))
if (XE_IOCTL_DBG(xe, gt_id > xe->info.tile_count))
return -EINVAL;
for (i = 0; i < num_vmas; ++i) {
......@@ -115,7 +115,7 @@ static int madvise_cpu_atomic(struct xe_device *xe, struct xe_vm *vm,
struct ww_acquire_ctx ww;
bo = xe_vma_bo(vmas[i]);
if (XE_IOCTL_ERR(xe, !(bo->flags & XE_BO_CREATE_SYSTEM_BIT)))
if (XE_IOCTL_DBG(xe, !(bo->flags & XE_BO_CREATE_SYSTEM_BIT)))
return -EINVAL;
err = xe_bo_lock(bo, &ww, 0, true);
......@@ -146,7 +146,7 @@ static int madvise_device_atomic(struct xe_device *xe, struct xe_vm *vm,
struct ww_acquire_ctx ww;
bo = xe_vma_bo(vmas[i]);
if (XE_IOCTL_ERR(xe, !(bo->flags & XE_BO_CREATE_VRAM0_BIT) &&
if (XE_IOCTL_DBG(xe, !(bo->flags & XE_BO_CREATE_VRAM0_BIT) &&
!(bo->flags & XE_BO_CREATE_VRAM1_BIT)))
return -EINVAL;
......@@ -165,10 +165,10 @@ static int madvise_priority(struct xe_device *xe, struct xe_vm *vm,
{
int i, err;
if (XE_IOCTL_ERR(xe, value > DRM_XE_VMA_PRIORITY_HIGH))
if (XE_IOCTL_DBG(xe, value > DRM_XE_VMA_PRIORITY_HIGH))
return -EINVAL;
if (XE_IOCTL_ERR(xe, value == DRM_XE_VMA_PRIORITY_HIGH &&
if (XE_IOCTL_DBG(xe, value == DRM_XE_VMA_PRIORITY_HIGH &&
!capable(CAP_SYS_NICE)))
return -EPERM;
......@@ -255,40 +255,40 @@ int xe_vm_madvise_ioctl(struct drm_device *dev, void *data,
struct xe_vma **vmas = NULL;
int num_vmas = 0, err = 0, idx;
if (XE_IOCTL_ERR(xe, args->extensions) ||
XE_IOCTL_ERR(xe, args->pad || args->pad2) ||
XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
if (XE_IOCTL_DBG(xe, args->extensions) ||
XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
if (XE_IOCTL_ERR(xe, args->property > ARRAY_SIZE(madvise_funcs)))
if (XE_IOCTL_DBG(xe, args->property > ARRAY_SIZE(madvise_funcs)))
return -EINVAL;
vm = xe_vm_lookup(xef, args->vm_id);
if (XE_IOCTL_ERR(xe, !vm))
if (XE_IOCTL_DBG(xe, !vm))
return -EINVAL;
if (XE_IOCTL_ERR(xe, !xe_vm_in_fault_mode(vm))) {
if (XE_IOCTL_DBG(xe, !xe_vm_in_fault_mode(vm))) {
err = -EINVAL;
goto put_vm;
}
down_read(&vm->lock);
if (XE_IOCTL_ERR(xe, xe_vm_is_closed_or_banned(vm))) {
if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
err = -ENOENT;
goto unlock_vm;
}
vmas = get_vmas(vm, &num_vmas, args->addr, args->range);
if (XE_IOCTL_ERR(xe, err))
if (XE_IOCTL_DBG(xe, err))
goto unlock_vm;
if (XE_IOCTL_ERR(xe, !vmas)) {
if (XE_IOCTL_DBG(xe, !vmas)) {
err = -ENOMEM;
goto unlock_vm;
}
if (XE_IOCTL_ERR(xe, !num_vmas)) {
if (XE_IOCTL_DBG(xe, !num_vmas)) {
err = -EINVAL;
goto unlock_vm;
}
......
......@@ -117,51 +117,51 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
unsigned long timeout;
ktime_t start;
if (XE_IOCTL_ERR(xe, args->extensions) || XE_IOCTL_ERR(xe, args->pad) ||
XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1]))
if (XE_IOCTL_DBG(xe, args->extensions) || XE_IOCTL_DBG(xe, args->pad) ||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
if (XE_IOCTL_ERR(xe, args->flags & ~VALID_FLAGS))
if (XE_IOCTL_DBG(xe, args->flags & ~VALID_FLAGS))
return -EINVAL;
if (XE_IOCTL_ERR(xe, args->op > MAX_OP))
if (XE_IOCTL_DBG(xe, args->op > MAX_OP))
return -EINVAL;
if (XE_IOCTL_ERR(xe, no_engines &&
if (XE_IOCTL_DBG(xe, no_engines &&
(args->num_engines || args->instances)))
return -EINVAL;
if (XE_IOCTL_ERR(xe, !no_engines && !args->num_engines))
if (XE_IOCTL_DBG(xe, !no_engines && !args->num_engines))
return -EINVAL;
if (XE_IOCTL_ERR(xe, !(args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR) &&
if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR) &&
addr & 0x7))
return -EINVAL;
if (XE_IOCTL_ERR(xe, args->num_engines > XE_HW_ENGINE_MAX_INSTANCE))
if (XE_IOCTL_DBG(xe, args->num_engines > XE_HW_ENGINE_MAX_INSTANCE))
return -EINVAL;
if (!no_engines) {
err = copy_from_user(eci, user_eci,
sizeof(struct drm_xe_engine_class_instance) *
args->num_engines);
if (XE_IOCTL_ERR(xe, err))
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
if (XE_IOCTL_ERR(xe, check_hw_engines(xe, eci,
if (XE_IOCTL_DBG(xe, check_hw_engines(xe, eci,
args->num_engines)))
return -EINVAL;
}
if (args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR) {
if (XE_IOCTL_ERR(xe, args->vm_id >> 32))
if (XE_IOCTL_DBG(xe, args->vm_id >> 32))
return -EINVAL;
vm = xe_vm_lookup(to_xe_file(file), args->vm_id);
if (XE_IOCTL_ERR(xe, !vm))
if (XE_IOCTL_DBG(xe, !vm))
return -ENOENT;
if (XE_IOCTL_ERR(xe, !vm->async_ops.error_capture.addr)) {
if (XE_IOCTL_DBG(xe, !vm->async_ops.error_capture.addr)) {
xe_vm_put(vm);
return -EOPNOTSUPP;
}
......@@ -226,9 +226,9 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
args->timeout = 0;
}
if (XE_IOCTL_ERR(xe, err < 0))
if (XE_IOCTL_DBG(xe, err < 0))
return err;
else if (XE_IOCTL_ERR(xe, !timeout))
else if (XE_IOCTL_DBG(xe, !timeout))
return -ETIME;
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment