Commit 84a1ed5e authored by Francois Dugast's avatar Francois Dugast Committed by Rodrigo Vivi

drm/xe/uapi: Remove unused flags

Those cases missed in previous uAPI cleanups were mostly accidentally
brought in from i915 or created to exercise the possibilities of gpuvm
but they are not used by userspace yet, so let's remove them. They can
still be brought back later if needed.

v2:
- Fix XE_VM_FLAG_FAULT_MODE support in xe_lrc.c (Brian Welty)
- Leave DRM_XE_VM_BIND_OP_UNMAP_ALL (José Roberto de Souza)
- Ensure invalid flag values are rejected (Rodrigo Vivi)

v3: Rebase after removal of persistent exec_queues (Francois Dugast)

v4: Rodrigo: Rebase after the new dumpable flag.

Fixes: dd08ebf6 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240222232356.175431-1-rodrigo.vivi@intel.com
parent a7a3d736
......@@ -354,91 +354,6 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *
return 0;
}
static int exec_queue_set_preemption_timeout(struct xe_device *xe,
struct xe_exec_queue *q, u64 value,
bool create)
{
u32 min = 0, max = 0;
xe_exec_queue_get_prop_minmax(q->hwe->eclass,
XE_EXEC_QUEUE_PREEMPT_TIMEOUT, &min, &max);
if (xe_exec_queue_enforce_schedule_limit() &&
!xe_hw_engine_timeout_in_range(value, min, max))
return -EINVAL;
if (!create)
return q->ops->set_preempt_timeout(q, value);
q->sched_props.preempt_timeout_us = value;
return 0;
}
static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create)
{
u32 min = 0, max = 0;
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
xe_exec_queue_get_prop_minmax(q->hwe->eclass,
XE_EXEC_QUEUE_JOB_TIMEOUT, &min, &max);
if (xe_exec_queue_enforce_schedule_limit() &&
!xe_hw_engine_timeout_in_range(value, min, max))
return -EINVAL;
q->sched_props.job_timeout_ms = value;
return 0;
}
static int exec_queue_set_acc_trigger(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create)
{
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
return -EINVAL;
q->usm.acc_trigger = value;
return 0;
}
static int exec_queue_set_acc_notify(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create)
{
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
return -EINVAL;
q->usm.acc_notify = value;
return 0;
}
static int exec_queue_set_acc_granularity(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create)
{
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
return -EINVAL;
if (value > DRM_XE_ACC_GRANULARITY_64M)
return -EINVAL;
q->usm.acc_granularity = value;
return 0;
}
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
struct xe_exec_queue *q,
u64 value, bool create);
......@@ -446,11 +361,6 @@ typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
};
static int exec_queue_user_ext_set_property(struct xe_device *xe,
......@@ -469,7 +379,9 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
if (XE_IOCTL_DBG(xe, ext.property >=
ARRAY_SIZE(exec_queue_set_property_funcs)) ||
XE_IOCTL_DBG(xe, ext.pad))
XE_IOCTL_DBG(xe, ext.pad) ||
XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
......
......@@ -141,16 +141,6 @@ struct xe_exec_queue {
spinlock_t lock;
} compute;
/** @usm: unified shared memory state */
struct {
/** @usm.acc_trigger: access counter trigger */
u32 acc_trigger;
/** @usm.acc_notify: access counter notify */
u32 acc_notify;
/** @usm.acc_granularity: access counter granularity */
u32 acc_granularity;
} usm;
/** @ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *ops;
......
......@@ -706,8 +706,6 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
#define PVC_CTX_ASID (0x2e + 1)
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
#define ACC_GRANULARITY_S 20
#define ACC_NOTIFY_S 16
int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size)
......@@ -778,13 +776,7 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_lrc_write_ctx_reg(lrc, CTX_RING_CTL,
RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
if (xe->info.has_asid && vm)
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID,
(q->usm.acc_granularity <<
ACC_GRANULARITY_S) | vm->usm.asid);
if (xe->info.has_usm && vm)
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ACC_CTR_THOLD,
(q->usm.acc_notify << ACC_NOTIFY_S) |
q->usm.acc_trigger);
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
lrc->desc = LRC_VALID;
lrc->desc |= LRC_LEGACY_64B_CONTEXT << LRC_ADDRESSING_MODE_SHIFT;
......
......@@ -2131,10 +2131,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
if (__op->op == DRM_GPUVA_OP_MAP) {
op->map.immediate =
flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
op->map.read_only =
flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
......@@ -2329,8 +2325,6 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
{
flags |= op->map.read_only ?
VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0;
flags |= op->map.dumpable ?
......@@ -2475,7 +2469,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
case DRM_GPUVA_OP_MAP:
err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
op->syncs, op->num_syncs,
op->map.immediate || !xe_vm_in_fault_mode(vm),
!xe_vm_in_fault_mode(vm),
op->flags & XE_VMA_OP_FIRST,
op->flags & XE_VMA_OP_LAST);
break;
......@@ -2750,9 +2744,7 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
return 0;
}
#define SUPPORTED_FLAGS \
(DRM_XE_VM_BIND_FLAG_READONLY | \
DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
#define SUPPORTED_FLAGS (DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE)
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
......
......@@ -294,10 +294,6 @@ struct xe_vm {
struct xe_vma_op_map {
/** @vma: VMA to map */
struct xe_vma *vma;
/** @immediate: Immediate bind */
bool immediate;
/** @read_only: Read only */
bool read_only;
/** @is_null: is NULL binding */
bool is_null;
/** @dumpable: whether BO is dumped on GPU hang */
......
......@@ -862,10 +862,6 @@ struct drm_xe_vm_destroy {
* - %DRM_XE_VM_BIND_OP_PREFETCH
*
* and the @flags can be:
* - %DRM_XE_VM_BIND_FLAG_READONLY
* - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - Valid on a faulting VM only, do the
* MAP operation immediately rather than deferring the MAP to the page
* fault handler.
* - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
* tables are setup with a special bit which indicates writes are
* dropped and all reads return zero. In the future, the NULL flags
......@@ -958,8 +954,6 @@ struct drm_xe_vm_bind_op {
/** @op: Bind operation to perform */
__u32 op;
#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
/** @flags: Bind flags */
......@@ -1076,19 +1070,6 @@ struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7
/* Monitor 128KB contiguous region with 4K sub-granularity */
#define DRM_XE_ACC_GRANULARITY_128K 0
/* Monitor 2MB contiguous region with 64KB sub-granularity */
#define DRM_XE_ACC_GRANULARITY_2M 1
/* Monitor 16MB contiguous region with 512KB sub-granularity */
#define DRM_XE_ACC_GRANULARITY_16M 2
/* Monitor 64MB contiguous region with 2M sub-granularity */
#define DRM_XE_ACC_GRANULARITY_64M 3
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment