Commit f1a9abc0 authored by Thomas Hellström's avatar Thomas Hellström

drm/xe/uapi: Remove support for persistent exec_queues

Persistent exec_queues delays explicit destruction of exec_queues
until they are done executing, but destruction on process exit
is still immediate. It turns out no UMD is relying on this
functionality, so remove it. If there turns out to be a use-case
in the future, let's re-add.

Persistent exec_queues were never used for LR VMs

v2:
- Don't add an "UNUSED" define for the missing property
  (Lucas, Rodrigo)
v3:
- Remove the remaining struct xe_exec_queue::persistent state
  (Niranjana, Lucas)

Fixes: dd08ebf6 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: David Airlie <airlied@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Acked-by: default avatarJosé Roberto de Souza <jose.souza@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240209113444.8396-1-thomas.hellstrom@linux.intel.com
parent f2c9364d
...@@ -86,9 +86,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) ...@@ -86,9 +86,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
return 0; return 0;
} }
static void device_kill_persistent_exec_queues(struct xe_device *xe,
struct xe_file *xef);
static void xe_file_close(struct drm_device *dev, struct drm_file *file) static void xe_file_close(struct drm_device *dev, struct drm_file *file)
{ {
struct xe_device *xe = to_xe_device(dev); struct xe_device *xe = to_xe_device(dev);
...@@ -105,8 +102,6 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file) ...@@ -105,8 +102,6 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
mutex_unlock(&xef->exec_queue.lock); mutex_unlock(&xef->exec_queue.lock);
xa_destroy(&xef->exec_queue.xa); xa_destroy(&xef->exec_queue.xa);
mutex_destroy(&xef->exec_queue.lock); mutex_destroy(&xef->exec_queue.lock);
device_kill_persistent_exec_queues(xe, xef);
mutex_lock(&xef->vm.lock); mutex_lock(&xef->vm.lock);
xa_for_each(&xef->vm.xa, idx, vm) xa_for_each(&xef->vm.xa, idx, vm)
xe_vm_close_and_put(vm); xe_vm_close_and_put(vm);
...@@ -258,9 +253,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, ...@@ -258,9 +253,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xa_erase(&xe->usm.asid_to_vm, asid); xa_erase(&xe->usm.asid_to_vm, asid);
} }
drmm_mutex_init(&xe->drm, &xe->persistent_engines.lock);
INIT_LIST_HEAD(&xe->persistent_engines.list);
spin_lock_init(&xe->pinned.lock); spin_lock_init(&xe->pinned.lock);
INIT_LIST_HEAD(&xe->pinned.kernel_bo_present); INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
INIT_LIST_HEAD(&xe->pinned.external_vram); INIT_LIST_HEAD(&xe->pinned.external_vram);
...@@ -599,37 +591,6 @@ void xe_device_shutdown(struct xe_device *xe) ...@@ -599,37 +591,6 @@ void xe_device_shutdown(struct xe_device *xe)
{ {
} }
void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q)
{
mutex_lock(&xe->persistent_engines.lock);
list_add_tail(&q->persistent.link, &xe->persistent_engines.list);
mutex_unlock(&xe->persistent_engines.lock);
}
void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
struct xe_exec_queue *q)
{
mutex_lock(&xe->persistent_engines.lock);
if (!list_empty(&q->persistent.link))
list_del(&q->persistent.link);
mutex_unlock(&xe->persistent_engines.lock);
}
static void device_kill_persistent_exec_queues(struct xe_device *xe,
struct xe_file *xef)
{
struct xe_exec_queue *q, *next;
mutex_lock(&xe->persistent_engines.lock);
list_for_each_entry_safe(q, next, &xe->persistent_engines.list,
persistent.link)
if (q->persistent.xef == xef) {
xe_exec_queue_kill(q);
list_del_init(&q->persistent.link);
}
mutex_unlock(&xe->persistent_engines.lock);
}
void xe_device_wmb(struct xe_device *xe) void xe_device_wmb(struct xe_device *xe)
{ {
struct xe_gt *gt = xe_root_mmio_gt(xe); struct xe_gt *gt = xe_root_mmio_gt(xe);
......
...@@ -42,10 +42,6 @@ int xe_device_probe(struct xe_device *xe); ...@@ -42,10 +42,6 @@ int xe_device_probe(struct xe_device *xe);
void xe_device_remove(struct xe_device *xe); void xe_device_remove(struct xe_device *xe);
void xe_device_shutdown(struct xe_device *xe); void xe_device_shutdown(struct xe_device *xe);
void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q);
void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
struct xe_exec_queue *q);
void xe_device_wmb(struct xe_device *xe); void xe_device_wmb(struct xe_device *xe);
static inline struct xe_file *to_xe_file(const struct drm_file *file) static inline struct xe_file *to_xe_file(const struct drm_file *file)
......
...@@ -348,14 +348,6 @@ struct xe_device { ...@@ -348,14 +348,6 @@ struct xe_device {
struct mutex lock; struct mutex lock;
} usm; } usm;
/** @persistent_engines: engines that are closed but still running */
struct {
/** @persistent_engines.lock: protects persistent engines */
struct mutex lock;
/** @persistent_engines.list: list of persistent engines */
struct list_head list;
} persistent_engines;
/** @pinned: pinned BO state */ /** @pinned: pinned BO state */
struct { struct {
/** @pinned.lock: protected pinned BO list state */ /** @pinned.lock: protected pinned BO list state */
......
...@@ -60,7 +60,6 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, ...@@ -60,7 +60,6 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
q->fence_irq = &gt->fence_irq[hwe->class]; q->fence_irq = &gt->fence_irq[hwe->class];
q->ring_ops = gt->ring_ops[hwe->class]; q->ring_ops = gt->ring_ops[hwe->class];
q->ops = gt->exec_queue_ops; q->ops = gt->exec_queue_ops;
INIT_LIST_HEAD(&q->persistent.link);
INIT_LIST_HEAD(&q->compute.link); INIT_LIST_HEAD(&q->compute.link);
INIT_LIST_HEAD(&q->multi_gt_link); INIT_LIST_HEAD(&q->multi_gt_link);
...@@ -375,23 +374,6 @@ static int exec_queue_set_preemption_timeout(struct xe_device *xe, ...@@ -375,23 +374,6 @@ static int exec_queue_set_preemption_timeout(struct xe_device *xe,
return 0; return 0;
} }
static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create)
{
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
if (XE_IOCTL_DBG(xe, xe_vm_in_preempt_fence_mode(q->vm)))
return -EINVAL;
if (value)
q->flags |= EXEC_QUEUE_FLAG_PERSISTENT;
else
q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
return 0;
}
static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q, static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create) u64 value, bool create)
{ {
...@@ -465,7 +447,6 @@ static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = { ...@@ -465,7 +447,6 @@ static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
...@@ -492,6 +473,9 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe, ...@@ -492,6 +473,9 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
return -EINVAL; return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs)); idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
if (!exec_queue_set_property_funcs[idx])
return -EINVAL;
return exec_queue_set_property_funcs[idx](xe, q, ext.value, create); return exec_queue_set_property_funcs[idx](xe, q, ext.value, create);
} }
...@@ -703,8 +687,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, ...@@ -703,8 +687,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
/* The migration vm doesn't hold rpm ref */ /* The migration vm doesn't hold rpm ref */
xe_device_mem_access_get(xe); xe_device_mem_access_get(xe);
flags = EXEC_QUEUE_FLAG_PERSISTENT | EXEC_QUEUE_FLAG_VM | flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
(id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate); migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
new = xe_exec_queue_create(xe, migrate_vm, logical_mask, new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
...@@ -755,9 +738,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, ...@@ -755,9 +738,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
} }
q = xe_exec_queue_create(xe, vm, logical_mask, q = xe_exec_queue_create(xe, vm, logical_mask,
args->width, hwe, args->width, hwe, 0,
xe_vm_in_lr_mode(vm) ? 0 :
EXEC_QUEUE_FLAG_PERSISTENT,
args->extensions); args->extensions);
up_read(&vm->lock); up_read(&vm->lock);
xe_vm_put(vm); xe_vm_put(vm);
...@@ -774,8 +755,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, ...@@ -774,8 +755,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
} }
} }
q->persistent.xef = xef;
mutex_lock(&xef->exec_queue.lock); mutex_lock(&xef->exec_queue.lock);
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
mutex_unlock(&xef->exec_queue.lock); mutex_unlock(&xef->exec_queue.lock);
...@@ -918,10 +897,7 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data, ...@@ -918,10 +897,7 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !q)) if (XE_IOCTL_DBG(xe, !q))
return -ENOENT; return -ENOENT;
if (!(q->flags & EXEC_QUEUE_FLAG_PERSISTENT)) xe_exec_queue_kill(q);
xe_exec_queue_kill(q);
else
xe_device_add_persistent_exec_queues(xe, q);
trace_xe_exec_queue_close(q); trace_xe_exec_queue_close(q);
xe_exec_queue_put(q); xe_exec_queue_put(q);
......
...@@ -105,16 +105,6 @@ struct xe_exec_queue { ...@@ -105,16 +105,6 @@ struct xe_exec_queue {
struct xe_guc_exec_queue *guc; struct xe_guc_exec_queue *guc;
}; };
/**
* @persistent: persistent exec queue state
*/
struct {
/** @persistent.xef: file which this exec queue belongs to */
struct xe_file *xef;
/** @persisiten.link: link in list of persistent exec queues */
struct list_head link;
} persistent;
/** /**
* @parallel: parallel submission state * @parallel: parallel submission state
*/ */
......
...@@ -378,8 +378,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w) ...@@ -378,8 +378,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
list_del(&exl->active_link); list_del(&exl->active_link);
spin_unlock_irqrestore(&exl->port->lock, flags); spin_unlock_irqrestore(&exl->port->lock, flags);
if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
xe_device_remove_persistent_exec_queues(xe, q);
drm_sched_entity_fini(&exl->entity); drm_sched_entity_fini(&exl->entity);
drm_sched_fini(&exl->sched); drm_sched_fini(&exl->sched);
kfree(exl); kfree(exl);
......
...@@ -1031,8 +1031,6 @@ static void __guc_exec_queue_fini_async(struct work_struct *w) ...@@ -1031,8 +1031,6 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
if (xe_exec_queue_is_lr(q)) if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr); cancel_work_sync(&ge->lr_tdr);
if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q);
release_guc_id(guc, q); release_guc_id(guc, q);
xe_sched_entity_fini(&ge->entity); xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched); xe_sched_fini(&ge->sched);
......
...@@ -1076,7 +1076,6 @@ struct drm_xe_exec_queue_create { ...@@ -1076,7 +1076,6 @@ struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment