Commit b8c1ba83 authored by Francois Dugast's avatar Francois Dugast Committed by Rodrigo Vivi

drm/xe: Prevent flooding the kernel log with XE_IOCTL_ERR

Lower log level of XE_IOCTL_ERR macro to debug in order to prevent flooding
kernel log.

v2: Rename XE_IOCTL_ERR to XE_IOCTL_DBG (Rodrigo Vivi)
v3: Rebase
v4: Fix style, remove unrelated change about __FILE__ and __LINE__

Link: https://lists.freedesktop.org/archives/intel-xe/2023-May/004704.htmlSigned-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 5ce58303
...@@ -1724,35 +1724,35 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -1724,35 +1724,35 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
u32 handle; u32 handle;
int err; int err;
if (XE_IOCTL_ERR(xe, args->extensions) || XE_IOCTL_ERR(xe, args->pad) || if (XE_IOCTL_DBG(xe, args->extensions) || XE_IOCTL_DBG(xe, args->pad) ||
XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1])) XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, args->flags & if (XE_IOCTL_DBG(xe, args->flags &
~(XE_GEM_CREATE_FLAG_DEFER_BACKING | ~(XE_GEM_CREATE_FLAG_DEFER_BACKING |
XE_GEM_CREATE_FLAG_SCANOUT | XE_GEM_CREATE_FLAG_SCANOUT |
xe->info.mem_region_mask))) xe->info.mem_region_mask)))
return -EINVAL; return -EINVAL;
/* at least one memory type must be specified */ /* at least one memory type must be specified */
if (XE_IOCTL_ERR(xe, !(args->flags & xe->info.mem_region_mask))) if (XE_IOCTL_DBG(xe, !(args->flags & xe->info.mem_region_mask)))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, args->handle)) if (XE_IOCTL_DBG(xe, args->handle))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, !args->size)) if (XE_IOCTL_DBG(xe, !args->size))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, args->size > SIZE_MAX)) if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, args->size & ~PAGE_MASK)) if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK))
return -EINVAL; return -EINVAL;
if (args->vm_id) { if (args->vm_id) {
vm = xe_vm_lookup(xef, args->vm_id); vm = xe_vm_lookup(xef, args->vm_id);
if (XE_IOCTL_ERR(xe, !vm)) if (XE_IOCTL_DBG(xe, !vm))
return -ENOENT; return -ENOENT;
err = xe_vm_lock(vm, &ww, 0, true); err = xe_vm_lock(vm, &ww, 0, true);
if (err) { if (err) {
...@@ -1795,15 +1795,15 @@ int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, ...@@ -1795,15 +1795,15 @@ int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
struct drm_xe_gem_mmap_offset *args = data; struct drm_xe_gem_mmap_offset *args = data;
struct drm_gem_object *gem_obj; struct drm_gem_object *gem_obj;
if (XE_IOCTL_ERR(xe, args->extensions) || if (XE_IOCTL_DBG(xe, args->extensions) ||
XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1])) XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, args->flags)) if (XE_IOCTL_DBG(xe, args->flags))
return -EINVAL; return -EINVAL;
gem_obj = drm_gem_object_lookup(file, args->handle); gem_obj = drm_gem_object_lookup(file, args->handle);
if (XE_IOCTL_ERR(xe, !gem_obj)) if (XE_IOCTL_DBG(xe, !gem_obj))
return -ENOENT; return -ENOENT;
/* The mmap offset was set up at BO allocation time. */ /* The mmap offset was set up at BO allocation time. */
......
This diff is collapsed.
...@@ -184,22 +184,22 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -184,22 +184,22 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
bool write_locked; bool write_locked;
int err = 0; int err = 0;
if (XE_IOCTL_ERR(xe, args->extensions) || if (XE_IOCTL_DBG(xe, args->extensions) ||
XE_IOCTL_ERR(xe, args->pad[0] || args->pad[1] || args->pad[2]) || XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1])) XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL; return -EINVAL;
engine = xe_engine_lookup(xef, args->engine_id); engine = xe_engine_lookup(xef, args->engine_id);
if (XE_IOCTL_ERR(xe, !engine)) if (XE_IOCTL_DBG(xe, !engine))
return -ENOENT; return -ENOENT;
if (XE_IOCTL_ERR(xe, engine->flags & ENGINE_FLAG_VM)) if (XE_IOCTL_DBG(xe, engine->flags & ENGINE_FLAG_VM))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, engine->width != args->num_batch_buffer)) if (XE_IOCTL_DBG(xe, engine->width != args->num_batch_buffer))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, engine->flags & ENGINE_FLAG_BANNED)) { if (XE_IOCTL_DBG(xe, engine->flags & ENGINE_FLAG_BANNED)) {
err = -ECANCELED; err = -ECANCELED;
goto err_engine; goto err_engine;
} }
......
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
#define XE_WARN_ON WARN_ON #define XE_WARN_ON WARN_ON
#define XE_BUG_ON BUG_ON #define XE_BUG_ON BUG_ON
#define XE_IOCTL_ERR(xe, cond) \ #define XE_IOCTL_DBG(xe, cond) \
((cond) && (drm_info(&(xe)->drm, \ ((cond) && (drm_dbg(&(xe)->drm, \
"Ioctl argument check failed at %s:%d: %s", \ "Ioctl argument check failed at %s:%d: %s", \
__FILE__, __LINE__, #cond), 1)) __FILE__, __LINE__, #cond), 1))
......
...@@ -447,14 +447,14 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data, ...@@ -447,14 +447,14 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data,
bool allowed; bool allowed;
int ret = 0; int ret = 0;
if (XE_IOCTL_ERR(xe, args->extensions) || if (XE_IOCTL_DBG(xe, args->extensions) ||
XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1])) XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, args->flags & ~VALID_MMIO_FLAGS)) if (XE_IOCTL_DBG(xe, args->flags & ~VALID_MMIO_FLAGS))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, !(args->flags & DRM_XE_MMIO_WRITE) && args->value)) if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_MMIO_WRITE) && args->value))
return -EINVAL; return -EINVAL;
allowed = capable(CAP_SYS_ADMIN); allowed = capable(CAP_SYS_ADMIN);
...@@ -469,12 +469,12 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data, ...@@ -469,12 +469,12 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data,
} }
} }
if (XE_IOCTL_ERR(xe, !allowed)) if (XE_IOCTL_DBG(xe, !allowed))
return -EPERM; return -EPERM;
bits_flag = args->flags & DRM_XE_MMIO_BITS_MASK; bits_flag = args->flags & DRM_XE_MMIO_BITS_MASK;
bytes = 1 << bits_flag; bytes = 1 << bits_flag;
if (XE_IOCTL_ERR(xe, args->addr + bytes > xe->mmio.size)) if (XE_IOCTL_DBG(xe, args->addr + bytes > xe->mmio.size))
return -EINVAL; return -EINVAL;
/* /*
...@@ -488,7 +488,7 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data, ...@@ -488,7 +488,7 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data,
if (args->flags & DRM_XE_MMIO_WRITE) { if (args->flags & DRM_XE_MMIO_WRITE) {
switch (bits_flag) { switch (bits_flag) {
case DRM_XE_MMIO_32BIT: case DRM_XE_MMIO_32BIT:
if (XE_IOCTL_ERR(xe, args->value > U32_MAX)) { if (XE_IOCTL_DBG(xe, args->value > U32_MAX)) {
ret = -EINVAL; ret = -EINVAL;
goto exit; goto exit;
} }
......
...@@ -60,12 +60,12 @@ static int query_engines(struct xe_device *xe, ...@@ -60,12 +60,12 @@ static int query_engines(struct xe_device *xe,
if (query->size == 0) { if (query->size == 0) {
query->size = size; query->size = size;
return 0; return 0;
} else if (XE_IOCTL_ERR(xe, query->size != size)) { } else if (XE_IOCTL_DBG(xe, query->size != size)) {
return -EINVAL; return -EINVAL;
} }
hw_engine_info = kmalloc(size, GFP_KERNEL); hw_engine_info = kmalloc(size, GFP_KERNEL);
if (XE_IOCTL_ERR(xe, !hw_engine_info)) if (XE_IOCTL_DBG(xe, !hw_engine_info))
return -ENOMEM; return -ENOMEM;
for_each_gt(gt, xe, gt_id) for_each_gt(gt, xe, gt_id)
...@@ -114,12 +114,12 @@ static int query_memory_usage(struct xe_device *xe, ...@@ -114,12 +114,12 @@ static int query_memory_usage(struct xe_device *xe,
if (query->size == 0) { if (query->size == 0) {
query->size = size; query->size = size;
return 0; return 0;
} else if (XE_IOCTL_ERR(xe, query->size != size)) { } else if (XE_IOCTL_DBG(xe, query->size != size)) {
return -EINVAL; return -EINVAL;
} }
usage = kzalloc(size, GFP_KERNEL); usage = kzalloc(size, GFP_KERNEL);
if (XE_IOCTL_ERR(xe, !usage)) if (XE_IOCTL_DBG(xe, !usage))
return -ENOMEM; return -ENOMEM;
man = ttm_manager_type(&xe->ttm, XE_PL_TT); man = ttm_manager_type(&xe->ttm, XE_PL_TT);
...@@ -177,12 +177,12 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query) ...@@ -177,12 +177,12 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
if (query->size == 0) { if (query->size == 0) {
query->size = size; query->size = size;
return 0; return 0;
} else if (XE_IOCTL_ERR(xe, query->size != size)) { } else if (XE_IOCTL_DBG(xe, query->size != size)) {
return -EINVAL; return -EINVAL;
} }
config = kzalloc(size, GFP_KERNEL); config = kzalloc(size, GFP_KERNEL);
if (XE_IOCTL_ERR(xe, !config)) if (XE_IOCTL_DBG(xe, !config))
return -ENOMEM; return -ENOMEM;
config->num_params = num_params; config->num_params = num_params;
...@@ -226,12 +226,12 @@ static int query_gts(struct xe_device *xe, struct drm_xe_device_query *query) ...@@ -226,12 +226,12 @@ static int query_gts(struct xe_device *xe, struct drm_xe_device_query *query)
if (query->size == 0) { if (query->size == 0) {
query->size = size; query->size = size;
return 0; return 0;
} else if (XE_IOCTL_ERR(xe, query->size != size)) { } else if (XE_IOCTL_DBG(xe, query->size != size)) {
return -EINVAL; return -EINVAL;
} }
gts = kzalloc(size, GFP_KERNEL); gts = kzalloc(size, GFP_KERNEL);
if (XE_IOCTL_ERR(xe, !gts)) if (XE_IOCTL_DBG(xe, !gts))
return -ENOMEM; return -ENOMEM;
gts->num_gt = xe->info.gt_count; gts->num_gt = xe->info.gt_count;
...@@ -273,12 +273,12 @@ static int query_hwconfig(struct xe_device *xe, ...@@ -273,12 +273,12 @@ static int query_hwconfig(struct xe_device *xe,
if (query->size == 0) { if (query->size == 0) {
query->size = size; query->size = size;
return 0; return 0;
} else if (XE_IOCTL_ERR(xe, query->size != size)) { } else if (XE_IOCTL_DBG(xe, query->size != size)) {
return -EINVAL; return -EINVAL;
} }
hwconfig = kzalloc(size, GFP_KERNEL); hwconfig = kzalloc(size, GFP_KERNEL);
if (XE_IOCTL_ERR(xe, !hwconfig)) if (XE_IOCTL_DBG(xe, !hwconfig))
return -ENOMEM; return -ENOMEM;
xe_device_mem_access_get(xe); xe_device_mem_access_get(xe);
...@@ -332,7 +332,7 @@ static int query_gt_topology(struct xe_device *xe, ...@@ -332,7 +332,7 @@ static int query_gt_topology(struct xe_device *xe,
if (query->size == 0) { if (query->size == 0) {
query->size = size; query->size = size;
return 0; return 0;
} else if (XE_IOCTL_ERR(xe, query->size != size)) { } else if (XE_IOCTL_DBG(xe, query->size != size)) {
return -EINVAL; return -EINVAL;
} }
...@@ -380,15 +380,15 @@ int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -380,15 +380,15 @@ int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_xe_device_query *query = data; struct drm_xe_device_query *query = data;
u32 idx; u32 idx;
if (XE_IOCTL_ERR(xe, query->extensions) || if (XE_IOCTL_DBG(xe, query->extensions) ||
XE_IOCTL_ERR(xe, query->reserved[0] || query->reserved[1])) XE_IOCTL_DBG(xe, query->reserved[0] || query->reserved[1]))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, query->query > ARRAY_SIZE(xe_query_funcs))) if (XE_IOCTL_DBG(xe, query->query > ARRAY_SIZE(xe_query_funcs)))
return -EINVAL; return -EINVAL;
idx = array_index_nospec(query->query, ARRAY_SIZE(xe_query_funcs)); idx = array_index_nospec(query->query, ARRAY_SIZE(xe_query_funcs));
if (XE_IOCTL_ERR(xe, !xe_query_funcs[idx])) if (XE_IOCTL_DBG(xe, !xe_query_funcs[idx]))
return -EINVAL; return -EINVAL;
return xe_query_funcs[idx](xe, query); return xe_query_funcs[idx](xe, query);
......
...@@ -110,44 +110,44 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, ...@@ -110,44 +110,44 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
if (copy_from_user(&sync_in, sync_user, sizeof(*sync_user))) if (copy_from_user(&sync_in, sync_user, sizeof(*sync_user)))
return -EFAULT; return -EFAULT;
if (XE_IOCTL_ERR(xe, sync_in.flags & if (XE_IOCTL_DBG(xe, sync_in.flags &
~(SYNC_FLAGS_TYPE_MASK | DRM_XE_SYNC_SIGNAL)) || ~(SYNC_FLAGS_TYPE_MASK | DRM_XE_SYNC_SIGNAL)) ||
XE_IOCTL_ERR(xe, sync_in.pad) || XE_IOCTL_DBG(xe, sync_in.pad) ||
XE_IOCTL_ERR(xe, sync_in.reserved[0] || sync_in.reserved[1])) XE_IOCTL_DBG(xe, sync_in.reserved[0] || sync_in.reserved[1]))
return -EINVAL; return -EINVAL;
signal = sync_in.flags & DRM_XE_SYNC_SIGNAL; signal = sync_in.flags & DRM_XE_SYNC_SIGNAL;
switch (sync_in.flags & SYNC_FLAGS_TYPE_MASK) { switch (sync_in.flags & SYNC_FLAGS_TYPE_MASK) {
case DRM_XE_SYNC_SYNCOBJ: case DRM_XE_SYNC_SYNCOBJ:
if (XE_IOCTL_ERR(xe, no_dma_fences && signal)) if (XE_IOCTL_DBG(xe, no_dma_fences && signal))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (XE_IOCTL_ERR(xe, upper_32_bits(sync_in.addr))) if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
return -EINVAL; return -EINVAL;
sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle); sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle);
if (XE_IOCTL_ERR(xe, !sync->syncobj)) if (XE_IOCTL_DBG(xe, !sync->syncobj))
return -ENOENT; return -ENOENT;
if (!signal) { if (!signal) {
sync->fence = drm_syncobj_fence_get(sync->syncobj); sync->fence = drm_syncobj_fence_get(sync->syncobj);
if (XE_IOCTL_ERR(xe, !sync->fence)) if (XE_IOCTL_DBG(xe, !sync->fence))
return -EINVAL; return -EINVAL;
} }
break; break;
case DRM_XE_SYNC_TIMELINE_SYNCOBJ: case DRM_XE_SYNC_TIMELINE_SYNCOBJ:
if (XE_IOCTL_ERR(xe, no_dma_fences && signal)) if (XE_IOCTL_DBG(xe, no_dma_fences && signal))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (XE_IOCTL_ERR(xe, upper_32_bits(sync_in.addr))) if (XE_IOCTL_DBG(xe, upper_32_bits(sync_in.addr)))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, sync_in.timeline_value == 0)) if (XE_IOCTL_DBG(xe, sync_in.timeline_value == 0))
return -EINVAL; return -EINVAL;
sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle); sync->syncobj = drm_syncobj_find(xef->drm, sync_in.handle);
if (XE_IOCTL_ERR(xe, !sync->syncobj)) if (XE_IOCTL_DBG(xe, !sync->syncobj))
return -ENOENT; return -ENOENT;
if (signal) { if (signal) {
...@@ -156,7 +156,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, ...@@ -156,7 +156,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
return -ENOMEM; return -ENOMEM;
} else { } else {
sync->fence = drm_syncobj_fence_get(sync->syncobj); sync->fence = drm_syncobj_fence_get(sync->syncobj);
if (XE_IOCTL_ERR(xe, !sync->fence)) if (XE_IOCTL_DBG(xe, !sync->fence))
return -EINVAL; return -EINVAL;
err = dma_fence_chain_find_seqno(&sync->fence, err = dma_fence_chain_find_seqno(&sync->fence,
...@@ -167,15 +167,15 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, ...@@ -167,15 +167,15 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
break; break;
case DRM_XE_SYNC_DMA_BUF: case DRM_XE_SYNC_DMA_BUF:
if (XE_IOCTL_ERR(xe, "TODO")) if (XE_IOCTL_DBG(xe, "TODO"))
return -EINVAL; return -EINVAL;
break; break;
case DRM_XE_SYNC_USER_FENCE: case DRM_XE_SYNC_USER_FENCE:
if (XE_IOCTL_ERR(xe, !signal)) if (XE_IOCTL_DBG(xe, !signal))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (XE_IOCTL_ERR(xe, sync_in.addr & 0x7)) if (XE_IOCTL_DBG(xe, sync_in.addr & 0x7))
return -EINVAL; return -EINVAL;
if (exec) { if (exec) {
...@@ -183,7 +183,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef, ...@@ -183,7 +183,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
} else { } else {
sync->ufence = user_fence_create(xe, sync_in.addr, sync->ufence = user_fence_create(xe, sync_in.addr,
sync_in.timeline_value); sync_in.timeline_value);
if (XE_IOCTL_ERR(xe, !sync->ufence)) if (XE_IOCTL_DBG(xe, !sync->ufence))
return -ENOMEM; return -ENOMEM;
} }
......
This diff is collapsed.
...@@ -19,10 +19,10 @@ static int madvise_preferred_mem_class(struct xe_device *xe, struct xe_vm *vm, ...@@ -19,10 +19,10 @@ static int madvise_preferred_mem_class(struct xe_device *xe, struct xe_vm *vm,
{ {
int i, err; int i, err;
if (XE_IOCTL_ERR(xe, value > XE_MEM_REGION_CLASS_VRAM)) if (XE_IOCTL_DBG(xe, value > XE_MEM_REGION_CLASS_VRAM))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, value == XE_MEM_REGION_CLASS_VRAM && if (XE_IOCTL_DBG(xe, value == XE_MEM_REGION_CLASS_VRAM &&
!xe->info.is_dgfx)) !xe->info.is_dgfx))
return -EINVAL; return -EINVAL;
...@@ -48,7 +48,7 @@ static int madvise_preferred_gt(struct xe_device *xe, struct xe_vm *vm, ...@@ -48,7 +48,7 @@ static int madvise_preferred_gt(struct xe_device *xe, struct xe_vm *vm,
{ {
int i, err; int i, err;
if (XE_IOCTL_ERR(xe, value > xe->info.tile_count)) if (XE_IOCTL_DBG(xe, value > xe->info.tile_count))
return -EINVAL; return -EINVAL;
for (i = 0; i < num_vmas; ++i) { for (i = 0; i < num_vmas; ++i) {
...@@ -77,14 +77,14 @@ static int madvise_preferred_mem_class_gt(struct xe_device *xe, ...@@ -77,14 +77,14 @@ static int madvise_preferred_mem_class_gt(struct xe_device *xe,
u32 gt_id = upper_32_bits(value); u32 gt_id = upper_32_bits(value);
u32 mem_class = lower_32_bits(value); u32 mem_class = lower_32_bits(value);
if (XE_IOCTL_ERR(xe, mem_class > XE_MEM_REGION_CLASS_VRAM)) if (XE_IOCTL_DBG(xe, mem_class > XE_MEM_REGION_CLASS_VRAM))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, mem_class == XE_MEM_REGION_CLASS_VRAM && if (XE_IOCTL_DBG(xe, mem_class == XE_MEM_REGION_CLASS_VRAM &&
!xe->info.is_dgfx)) !xe->info.is_dgfx))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, gt_id > xe->info.tile_count)) if (XE_IOCTL_DBG(xe, gt_id > xe->info.tile_count))
return -EINVAL; return -EINVAL;
for (i = 0; i < num_vmas; ++i) { for (i = 0; i < num_vmas; ++i) {
...@@ -115,7 +115,7 @@ static int madvise_cpu_atomic(struct xe_device *xe, struct xe_vm *vm, ...@@ -115,7 +115,7 @@ static int madvise_cpu_atomic(struct xe_device *xe, struct xe_vm *vm,
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
bo = xe_vma_bo(vmas[i]); bo = xe_vma_bo(vmas[i]);
if (XE_IOCTL_ERR(xe, !(bo->flags & XE_BO_CREATE_SYSTEM_BIT))) if (XE_IOCTL_DBG(xe, !(bo->flags & XE_BO_CREATE_SYSTEM_BIT)))
return -EINVAL; return -EINVAL;
err = xe_bo_lock(bo, &ww, 0, true); err = xe_bo_lock(bo, &ww, 0, true);
...@@ -146,7 +146,7 @@ static int madvise_device_atomic(struct xe_device *xe, struct xe_vm *vm, ...@@ -146,7 +146,7 @@ static int madvise_device_atomic(struct xe_device *xe, struct xe_vm *vm,
struct ww_acquire_ctx ww; struct ww_acquire_ctx ww;
bo = xe_vma_bo(vmas[i]); bo = xe_vma_bo(vmas[i]);
if (XE_IOCTL_ERR(xe, !(bo->flags & XE_BO_CREATE_VRAM0_BIT) && if (XE_IOCTL_DBG(xe, !(bo->flags & XE_BO_CREATE_VRAM0_BIT) &&
!(bo->flags & XE_BO_CREATE_VRAM1_BIT))) !(bo->flags & XE_BO_CREATE_VRAM1_BIT)))
return -EINVAL; return -EINVAL;
...@@ -165,10 +165,10 @@ static int madvise_priority(struct xe_device *xe, struct xe_vm *vm, ...@@ -165,10 +165,10 @@ static int madvise_priority(struct xe_device *xe, struct xe_vm *vm,
{ {
int i, err; int i, err;
if (XE_IOCTL_ERR(xe, value > DRM_XE_VMA_PRIORITY_HIGH)) if (XE_IOCTL_DBG(xe, value > DRM_XE_VMA_PRIORITY_HIGH))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, value == DRM_XE_VMA_PRIORITY_HIGH && if (XE_IOCTL_DBG(xe, value == DRM_XE_VMA_PRIORITY_HIGH &&
!capable(CAP_SYS_NICE))) !capable(CAP_SYS_NICE)))
return -EPERM; return -EPERM;
...@@ -255,40 +255,40 @@ int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, ...@@ -255,40 +255,40 @@ int xe_vm_madvise_ioctl(struct drm_device *dev, void *data,
struct xe_vma **vmas = NULL; struct xe_vma **vmas = NULL;
int num_vmas = 0, err = 0, idx; int num_vmas = 0, err = 0, idx;
if (XE_IOCTL_ERR(xe, args->extensions) || if (XE_IOCTL_DBG(xe, args->extensions) ||
XE_IOCTL_ERR(xe, args->pad || args->pad2) || XE_IOCTL_DBG(xe, args->pad || args->pad2) ||
XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1])) XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, args->property > ARRAY_SIZE(madvise_funcs))) if (XE_IOCTL_DBG(xe, args->property > ARRAY_SIZE(madvise_funcs)))
return -EINVAL; return -EINVAL;
vm = xe_vm_lookup(xef, args->vm_id); vm = xe_vm_lookup(xef, args->vm_id);
if (XE_IOCTL_ERR(xe, !vm)) if (XE_IOCTL_DBG(xe, !vm))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, !xe_vm_in_fault_mode(vm))) { if (XE_IOCTL_DBG(xe, !xe_vm_in_fault_mode(vm))) {
err = -EINVAL; err = -EINVAL;
goto put_vm; goto put_vm;
} }
down_read(&vm->lock); down_read(&vm->lock);
if (XE_IOCTL_ERR(xe, xe_vm_is_closed_or_banned(vm))) { if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
err = -ENOENT; err = -ENOENT;
goto unlock_vm; goto unlock_vm;
} }
vmas = get_vmas(vm, &num_vmas, args->addr, args->range); vmas = get_vmas(vm, &num_vmas, args->addr, args->range);
if (XE_IOCTL_ERR(xe, err)) if (XE_IOCTL_DBG(xe, err))
goto unlock_vm; goto unlock_vm;
if (XE_IOCTL_ERR(xe, !vmas)) { if (XE_IOCTL_DBG(xe, !vmas)) {
err = -ENOMEM; err = -ENOMEM;
goto unlock_vm; goto unlock_vm;
} }
if (XE_IOCTL_ERR(xe, !num_vmas)) { if (XE_IOCTL_DBG(xe, !num_vmas)) {
err = -EINVAL; err = -EINVAL;
goto unlock_vm; goto unlock_vm;
} }
......
...@@ -117,51 +117,51 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, ...@@ -117,51 +117,51 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
unsigned long timeout; unsigned long timeout;
ktime_t start; ktime_t start;
if (XE_IOCTL_ERR(xe, args->extensions) || XE_IOCTL_ERR(xe, args->pad) || if (XE_IOCTL_DBG(xe, args->extensions) || XE_IOCTL_DBG(xe, args->pad) ||
XE_IOCTL_ERR(xe, args->reserved[0] || args->reserved[1])) XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, args->flags & ~VALID_FLAGS)) if (XE_IOCTL_DBG(xe, args->flags & ~VALID_FLAGS))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, args->op > MAX_OP)) if (XE_IOCTL_DBG(xe, args->op > MAX_OP))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, no_engines && if (XE_IOCTL_DBG(xe, no_engines &&
(args->num_engines || args->instances))) (args->num_engines || args->instances)))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, !no_engines && !args->num_engines)) if (XE_IOCTL_DBG(xe, !no_engines && !args->num_engines))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, !(args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR) && if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR) &&
addr & 0x7)) addr & 0x7))
return -EINVAL; return -EINVAL;
if (XE_IOCTL_ERR(xe, args->num_engines > XE_HW_ENGINE_MAX_INSTANCE)) if (XE_IOCTL_DBG(xe, args->num_engines > XE_HW_ENGINE_MAX_INSTANCE))
return -EINVAL; return -EINVAL;
if (!no_engines) { if (!no_engines) {
err = copy_from_user(eci, user_eci, err = copy_from_user(eci, user_eci,
sizeof(struct drm_xe_engine_class_instance) * sizeof(struct drm_xe_engine_class_instance) *
args->num_engines); args->num_engines);
if (XE_IOCTL_ERR(xe, err)) if (XE_IOCTL_DBG(xe, err))
return -EFAULT; return -EFAULT;
if (XE_IOCTL_ERR(xe, check_hw_engines(xe, eci, if (XE_IOCTL_DBG(xe, check_hw_engines(xe, eci,
args->num_engines))) args->num_engines)))
return -EINVAL; return -EINVAL;
} }
if (args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR) { if (args->flags & DRM_XE_UFENCE_WAIT_VM_ERROR) {
if (XE_IOCTL_ERR(xe, args->vm_id >> 32)) if (XE_IOCTL_DBG(xe, args->vm_id >> 32))
return -EINVAL; return -EINVAL;
vm = xe_vm_lookup(to_xe_file(file), args->vm_id); vm = xe_vm_lookup(to_xe_file(file), args->vm_id);
if (XE_IOCTL_ERR(xe, !vm)) if (XE_IOCTL_DBG(xe, !vm))
return -ENOENT; return -ENOENT;
if (XE_IOCTL_ERR(xe, !vm->async_ops.error_capture.addr)) { if (XE_IOCTL_DBG(xe, !vm->async_ops.error_capture.addr)) {
xe_vm_put(vm); xe_vm_put(vm);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -226,9 +226,9 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, ...@@ -226,9 +226,9 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
args->timeout = 0; args->timeout = 0;
} }
if (XE_IOCTL_ERR(xe, err < 0)) if (XE_IOCTL_DBG(xe, err < 0))
return err; return err;
else if (XE_IOCTL_ERR(xe, !timeout)) else if (XE_IOCTL_DBG(xe, !timeout))
return -ETIME; return -ETIME;
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment