Commit 4e03b584 authored by Mauro Carvalho Chehab's avatar Mauro Carvalho Chehab Committed by Rodrigo Vivi

drm/xe/uapi: Reject bo creation of unaligned size

For xe bo creation we request passing size which matches system or
vram minimum page alignment. This way we want to ensure userspace
is aware of region constraints and not aligned allocations will be
rejected returning EINVAL.

v2:
- Rebase, Update uAPI documentation. (Thomas)
v3:
- Adjust the dma-buf kunit test accordingly. (Thomas)
v4:
- Fixed rebase conflicts and updated commit message. (Francois)
Signed-off-by: default avatarMauro Carvalho Chehab <mauro.chehab@linux.intel.com>
Signed-off-by: default avatarZbigniew Kempczyński <zbigniew.kempczynski@intel.com>
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Reviewed-by: default avatarJosé Roberto de Souza <jose.souza@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 2bec3071
......@@ -109,15 +109,21 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
struct drm_gem_object *import;
struct dma_buf *dmabuf;
struct xe_bo *bo;
size_t size;
/* No VRAM on this device? */
if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
(params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
return;
size = PAGE_SIZE;
if ((params->mem_mask & XE_BO_CREATE_VRAM0_BIT) &&
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
size = SZ_64K;
kunit_info(test, "running %s\n", __func__);
bo = xe_bo_create_user(xe, NULL, NULL, PAGE_SIZE, DRM_XE_GEM_CPU_CACHING_WC,
ttm_bo_type_device, params->mem_mask);
bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
ttm_bo_type_device, XE_BO_CREATE_USER_BIT | params->mem_mask);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(bo));
......
......@@ -1222,6 +1222,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
};
struct ttm_placement *placement;
uint32_t alignment;
size_t aligned_size;
int err;
/* Only kernel objects should set GT */
......@@ -1232,23 +1233,30 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
return ERR_PTR(-EINVAL);
}
if (!bo) {
bo = xe_bo_alloc();
if (IS_ERR(bo))
return bo;
}
if (flags & (XE_BO_CREATE_VRAM_MASK | XE_BO_CREATE_STOLEN_BIT) &&
!(flags & XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT) &&
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) {
size = ALIGN(size, SZ_64K);
aligned_size = ALIGN(size, SZ_64K);
if (type != ttm_bo_type_device)
size = ALIGN(size, SZ_64K);
flags |= XE_BO_INTERNAL_64K;
alignment = SZ_64K >> PAGE_SHIFT;
} else {
size = ALIGN(size, PAGE_SIZE);
aligned_size = ALIGN(size, SZ_4K);
flags &= ~XE_BO_INTERNAL_64K;
alignment = SZ_4K >> PAGE_SHIFT;
}
if (type == ttm_bo_type_device && aligned_size != size)
return ERR_PTR(-EINVAL);
if (!bo) {
bo = xe_bo_alloc();
if (IS_ERR(bo))
return bo;
}
bo->tile = tile;
bo->size = size;
bo->flags = flags;
......@@ -1566,7 +1574,7 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile
struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
const void *data, size_t size, u32 flags)
{
struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, size, flags);
struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags);
if (IS_ERR(bo))
return bo;
......
......@@ -207,11 +207,13 @@ struct drm_xe_query_mem_region {
*
* When the kernel allocates memory for this region, the
* underlying pages will be at least @min_page_size in size.
*
* Important note: When userspace allocates a GTT address which
* can point to memory allocated from this region, it must also
* respect this minimum alignment. This is enforced by the
* kernel.
* Buffer objects with an allowable placement in this region must be
* created with a size aligned to this value.
* GPU virtual address mappings of (parts of) buffer objects that
* may be placed in this region must also have their GPU virtual
* address and range aligned to this value.
* Affected IOCTLS will return %-EINVAL if alignment restrictions are
* not met.
*/
__u32 min_page_size;
/**
......@@ -517,9 +519,8 @@ struct drm_xe_gem_create {
__u64 extensions;
/**
* @size: Requested size for the object
*
* The (page-aligned) allocated size for the object will be returned.
* @size: Size of the object to be created, must match region
* (system or vram) minimum alignment (&min_page_size).
*/
__u64 size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment