Commit 3b97e3b2 authored by Matthew Brost's avatar Matthew Brost Committed by Rodrigo Vivi

drm/xe: Use a flags field instead of bools for VMA create

Use a flags field instead of severval bools for VMA create as it is
easier to read and less bug prone.
Suggested-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 35705e32
...@@ -778,17 +778,20 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) ...@@ -778,17 +778,20 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
return fence; return fence;
} }
#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
#define VMA_CREATE_FLAG_IS_NULL BIT(1)
static struct xe_vma *xe_vma_create(struct xe_vm *vm, static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo, struct xe_bo *bo,
u64 bo_offset_or_userptr, u64 bo_offset_or_userptr,
u64 start, u64 end, u64 start, u64 end,
bool read_only, u16 pat_index, unsigned int flags)
bool is_null,
u16 pat_index)
{ {
struct xe_vma *vma; struct xe_vma *vma;
struct xe_tile *tile; struct xe_tile *tile;
u8 id; u8 id;
bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
xe_assert(vm->xe, start < end); xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size); xe_assert(vm->xe, end < vm->size);
...@@ -2117,7 +2120,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, ...@@ -2117,7 +2120,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
} }
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
bool read_only, bool is_null, u16 pat_index) u16 pat_index, unsigned int flags)
{ {
struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL; struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
struct drm_exec exec; struct drm_exec exec;
...@@ -2146,8 +2149,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, ...@@ -2146,8 +2149,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
} }
vma = xe_vma_create(vm, bo, op->gem.offset, vma = xe_vma_create(vm, bo, op->gem.offset,
op->va.addr, op->va.addr + op->va.addr, op->va.addr +
op->va.range - 1, read_only, is_null, op->va.range - 1, pat_index, flags);
pat_index);
if (bo) if (bo)
drm_exec_fini(&exec); drm_exec_fini(&exec);
...@@ -2272,7 +2274,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, ...@@ -2272,7 +2274,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
drm_gpuva_for_each_op(__op, ops) { drm_gpuva_for_each_op(__op, ops) {
struct xe_vma_op *op = gpuva_op_to_vma_op(__op); struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
struct xe_vma *vma;
bool first = list_empty(ops_list); bool first = list_empty(ops_list);
unsigned int flags = 0;
INIT_LIST_HEAD(&op->link); INIT_LIST_HEAD(&op->link);
list_add_tail(&op->link, ops_list); list_add_tail(&op->link, ops_list);
...@@ -2288,10 +2292,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, ...@@ -2288,10 +2292,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
switch (op->base.op) { switch (op->base.op) {
case DRM_GPUVA_OP_MAP: case DRM_GPUVA_OP_MAP:
{ {
struct xe_vma *vma; flags |= op->map.read_only ?
VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0;
vma = new_vma(vm, &op->base.map, op->map.read_only, vma = new_vma(vm, &op->base.map, op->map.pat_index,
op->map.is_null, op->map.pat_index); flags);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
...@@ -2307,16 +2314,15 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, ...@@ -2307,16 +2314,15 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
op->remap.range = xe_vma_size(old); op->remap.range = xe_vma_size(old);
if (op->base.remap.prev) { if (op->base.remap.prev) {
struct xe_vma *vma; flags |= op->base.remap.unmap->va->flags &
bool read_only = XE_VMA_READ_ONLY ?
op->base.remap.unmap->va->flags & VMA_CREATE_FLAG_READ_ONLY : 0;
XE_VMA_READ_ONLY; flags |= op->base.remap.unmap->va->flags &
bool is_null = DRM_GPUVA_SPARSE ?
op->base.remap.unmap->va->flags & VMA_CREATE_FLAG_IS_NULL : 0;
DRM_GPUVA_SPARSE;
vma = new_vma(vm, op->base.remap.prev,
vma = new_vma(vm, op->base.remap.prev, read_only, old->pat_index, flags);
is_null, old->pat_index);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
...@@ -2339,17 +2345,15 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, ...@@ -2339,17 +2345,15 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
} }
if (op->base.remap.next) { if (op->base.remap.next) {
struct xe_vma *vma; flags |= op->base.remap.unmap->va->flags &
bool read_only = XE_VMA_READ_ONLY ?
op->base.remap.unmap->va->flags & VMA_CREATE_FLAG_READ_ONLY : 0;
XE_VMA_READ_ONLY; flags |= op->base.remap.unmap->va->flags &
DRM_GPUVA_SPARSE ?
bool is_null = VMA_CREATE_FLAG_IS_NULL : 0;
op->base.remap.unmap->va->flags &
DRM_GPUVA_SPARSE; vma = new_vma(vm, op->base.remap.next,
old->pat_index, flags);
vma = new_vma(vm, op->base.remap.next, read_only,
is_null, old->pat_index);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment