Commit 4bc9dd98 authored by Francois Dugast's avatar Francois Dugast Committed by Rodrigo Vivi

drm/xe/uapi: Align on a common way to return arrays (memory regions)

The uAPI provides queries which return arrays of elements. As of now
the format used in the struct is different depending on which element
is queried. Fix this for memory regions by applying the pattern below:

    struct drm_xe_query_Xs {
       __u32 num_Xs;
       struct drm_xe_X Xs[];
       ...
    }

This removes "query" in the name of struct drm_xe_query_mem_region
as it is not returned from the query IOCTL. There is no functional
change.

v2: Only rename drm_xe_query_mem_region to drm_xe_mem_region
    (José Roberto de Souza)

v3: Rename usage to mem_regions in xe_query.c (José Roberto de Souza)
Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: default avatarJosé Roberto de Souza <jose.souza@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 4e03b584
...@@ -240,14 +240,14 @@ static size_t calc_mem_regions_size(struct xe_device *xe) ...@@ -240,14 +240,14 @@ static size_t calc_mem_regions_size(struct xe_device *xe)
if (ttm_manager_type(&xe->ttm, i)) if (ttm_manager_type(&xe->ttm, i))
num_managers++; num_managers++;
return offsetof(struct drm_xe_query_mem_regions, regions[num_managers]); return offsetof(struct drm_xe_query_mem_regions, mem_regions[num_managers]);
} }
static int query_mem_regions(struct xe_device *xe, static int query_mem_regions(struct xe_device *xe,
struct drm_xe_device_query *query) struct drm_xe_device_query *query)
{ {
size_t size = calc_mem_regions_size(xe); size_t size = calc_mem_regions_size(xe);
struct drm_xe_query_mem_regions *usage; struct drm_xe_query_mem_regions *mem_regions;
struct drm_xe_query_mem_regions __user *query_ptr = struct drm_xe_query_mem_regions __user *query_ptr =
u64_to_user_ptr(query->data); u64_to_user_ptr(query->data);
struct ttm_resource_manager *man; struct ttm_resource_manager *man;
...@@ -260,50 +260,52 @@ static int query_mem_regions(struct xe_device *xe, ...@@ -260,50 +260,52 @@ static int query_mem_regions(struct xe_device *xe,
return -EINVAL; return -EINVAL;
} }
usage = kzalloc(size, GFP_KERNEL); mem_regions = kzalloc(size, GFP_KERNEL);
if (XE_IOCTL_DBG(xe, !usage)) if (XE_IOCTL_DBG(xe, !mem_regions))
return -ENOMEM; return -ENOMEM;
man = ttm_manager_type(&xe->ttm, XE_PL_TT); man = ttm_manager_type(&xe->ttm, XE_PL_TT);
usage->regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM; mem_regions->mem_regions[0].mem_class = DRM_XE_MEM_REGION_CLASS_SYSMEM;
usage->regions[0].instance = 0; mem_regions->mem_regions[0].instance = 0;
usage->regions[0].min_page_size = PAGE_SIZE; mem_regions->mem_regions[0].min_page_size = PAGE_SIZE;
usage->regions[0].total_size = man->size << PAGE_SHIFT; mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT;
if (perfmon_capable()) if (perfmon_capable())
usage->regions[0].used = ttm_resource_manager_usage(man); mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man);
usage->num_regions = 1; mem_regions->num_mem_regions = 1;
for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) { for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
man = ttm_manager_type(&xe->ttm, i); man = ttm_manager_type(&xe->ttm, i);
if (man) { if (man) {
usage->regions[usage->num_regions].mem_class = mem_regions->mem_regions[mem_regions->num_mem_regions].mem_class =
DRM_XE_MEM_REGION_CLASS_VRAM; DRM_XE_MEM_REGION_CLASS_VRAM;
usage->regions[usage->num_regions].instance = mem_regions->mem_regions[mem_regions->num_mem_regions].instance =
usage->num_regions; mem_regions->num_mem_regions;
usage->regions[usage->num_regions].min_page_size = mem_regions->mem_regions[mem_regions->num_mem_regions].min_page_size =
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ?
SZ_64K : PAGE_SIZE; SZ_64K : PAGE_SIZE;
usage->regions[usage->num_regions].total_size = mem_regions->mem_regions[mem_regions->num_mem_regions].total_size =
man->size; man->size;
if (perfmon_capable()) { if (perfmon_capable()) {
xe_ttm_vram_get_used(man, xe_ttm_vram_get_used(man,
&usage->regions[usage->num_regions].used, &mem_regions->mem_regions
&usage->regions[usage->num_regions].cpu_visible_used); [mem_regions->num_mem_regions].used,
&mem_regions->mem_regions
[mem_regions->num_mem_regions].cpu_visible_used);
} }
usage->regions[usage->num_regions].cpu_visible_size = mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size =
xe_ttm_vram_get_cpu_visible_size(man); xe_ttm_vram_get_cpu_visible_size(man);
usage->num_regions++; mem_regions->num_mem_regions++;
} }
} }
if (!copy_to_user(query_ptr, usage, size)) if (!copy_to_user(query_ptr, mem_regions, size))
ret = 0; ret = 0;
else else
ret = -ENOSPC; ret = -ENOSPC;
kfree(usage); kfree(mem_regions);
return ret; return ret;
} }
......
...@@ -183,10 +183,10 @@ enum drm_xe_memory_class { ...@@ -183,10 +183,10 @@ enum drm_xe_memory_class {
}; };
/** /**
* struct drm_xe_query_mem_region - Describes some region as known to * struct drm_xe_mem_region - Describes some region as known to
* the driver. * the driver.
*/ */
struct drm_xe_query_mem_region { struct drm_xe_mem_region {
/** /**
* @mem_class: The memory class describing this region. * @mem_class: The memory class describing this region.
* *
...@@ -323,12 +323,12 @@ struct drm_xe_query_engine_cycles { ...@@ -323,12 +323,12 @@ struct drm_xe_query_engine_cycles {
* struct drm_xe_query_mem_regions in .data. * struct drm_xe_query_mem_regions in .data.
*/ */
struct drm_xe_query_mem_regions { struct drm_xe_query_mem_regions {
/** @num_regions: number of memory regions returned in @regions */ /** @num_mem_regions: number of memory regions returned in @mem_regions */
__u32 num_regions; __u32 num_mem_regions;
/** @pad: MBZ */ /** @pad: MBZ */
__u32 pad; __u32 pad;
/** @regions: The returned regions for this device */ /** @mem_regions: The returned memory regions for this device */
struct drm_xe_query_mem_region regions[]; struct drm_xe_mem_region mem_regions[];
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment