Commit 9b9529ce authored by Francois Dugast's avatar Francois Dugast Committed by Rodrigo Vivi

drm/xe: Rename engine to exec_queue

Engine was inappropriately used to refer to execution queues and it
also created some confusion with hardware engines. Where it applies
the exec_queue variable name is changed to q and comments are also
updated.

Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/162Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent c22a4ed0
......@@ -38,7 +38,7 @@ static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
struct kunit *test)
{
u64 batch_base = xe_migrate_batch_base(m, xe->info.supports_usm);
struct xe_sched_job *job = xe_bb_create_migration_job(m->eng, bb,
struct xe_sched_job *job = xe_bb_create_migration_job(m->q, bb,
batch_base,
second_idx);
struct dma_fence *fence;
......@@ -215,7 +215,7 @@ static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt,
xe_map_memset(xe, &pt->vmap, 0, (u8)expected, pt->size);
then = ktime_get();
fence = xe_migrate_update_pgtables(m, NULL, NULL, m->eng, &update, 1,
fence = xe_migrate_update_pgtables(m, NULL, NULL, m->q, &update, 1,
NULL, 0, &pt_update);
now = ktime_get();
if (sanity_fence_failed(xe, fence, "Migration pagetable update", test))
......@@ -257,7 +257,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
return;
}
big = xe_bo_create_pin_map(xe, tile, m->eng->vm, SZ_4M,
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT);
......@@ -266,7 +266,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
goto vunmap;
}
pt = xe_bo_create_pin_map(xe, tile, m->eng->vm, XE_PAGE_SIZE,
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT);
......@@ -276,7 +276,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
goto free_big;
}
tiny = xe_bo_create_pin_map(xe, tile, m->eng->vm,
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
......@@ -295,14 +295,14 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
}
kunit_info(test, "Starting tests, top level PT addr: %lx, special pagetable base addr: %lx\n",
(unsigned long)xe_bo_main_addr(m->eng->vm->pt_root[id]->bo, XE_PAGE_SIZE),
(unsigned long)xe_bo_main_addr(m->q->vm->pt_root[id]->bo, XE_PAGE_SIZE),
(unsigned long)xe_bo_main_addr(m->pt_bo, XE_PAGE_SIZE));
/* First part of the test, are we updating our pagetable bo with a new entry? */
xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
0xdeaddeadbeefbeef);
expected = xe_pte_encode(pt, 0, XE_CACHE_WB, 0);
if (m->eng->vm->flags & XE_VM_FLAG_64K)
if (m->q->vm->flags & XE_VM_FLAG_64K)
expected |= XE_PTE_PS64;
if (xe_bo_is_vram(pt))
xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
......@@ -399,11 +399,11 @@ static int migrate_test_run_device(struct xe_device *xe)
struct ww_acquire_ctx ww;
kunit_info(test, "Testing tile id %d.\n", id);
xe_vm_lock(m->eng->vm, &ww, 0, true);
xe_vm_lock(m->q->vm, &ww, 0, true);
xe_device_mem_access_get(xe);
xe_migrate_sanity_test(m, test);
xe_device_mem_access_put(xe);
xe_vm_unlock(m->eng->vm, &ww);
xe_vm_unlock(m->q->vm, &ww);
}
return 0;
......
......@@ -7,7 +7,7 @@
#include "regs/xe_gpu_commands.h"
#include "xe_device.h"
#include "xe_engine_types.h"
#include "xe_exec_queue_types.h"
#include "xe_gt.h"
#include "xe_hw_fence.h"
#include "xe_sa.h"
......@@ -60,30 +60,30 @@ struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm)
}
static struct xe_sched_job *
__xe_bb_create_job(struct xe_engine *kernel_eng, struct xe_bb *bb, u64 *addr)
__xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr)
{
u32 size = drm_suballoc_size(bb->bo);
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
WARN_ON(bb->len * 4 + bb_prefetch(kernel_eng->gt) > size);
WARN_ON(bb->len * 4 + bb_prefetch(q->gt) > size);
xe_sa_bo_flush_write(bb->bo);
return xe_sched_job_create(kernel_eng, addr);
return xe_sched_job_create(q, addr);
}
struct xe_sched_job *xe_bb_create_wa_job(struct xe_engine *wa_eng,
struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q,
struct xe_bb *bb, u64 batch_base_ofs)
{
u64 addr = batch_base_ofs + drm_suballoc_soffset(bb->bo);
XE_WARN_ON(!(wa_eng->vm->flags & XE_VM_FLAG_MIGRATION));
XE_WARN_ON(!(q->vm->flags & XE_VM_FLAG_MIGRATION));
return __xe_bb_create_job(wa_eng, bb, &addr);
return __xe_bb_create_job(q, bb, &addr);
}
struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng,
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
struct xe_bb *bb,
u64 batch_base_ofs,
u32 second_idx)
......@@ -95,18 +95,18 @@ struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng,
};
XE_WARN_ON(second_idx > bb->len);
XE_WARN_ON(!(kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION));
XE_WARN_ON(!(q->vm->flags & XE_VM_FLAG_MIGRATION));
return __xe_bb_create_job(kernel_eng, bb, addr);
return __xe_bb_create_job(q, bb, addr);
}
struct xe_sched_job *xe_bb_create_job(struct xe_engine *kernel_eng,
struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
struct xe_bb *bb)
{
u64 addr = xe_sa_bo_gpu_addr(bb->bo);
XE_WARN_ON(kernel_eng->vm && kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION);
return __xe_bb_create_job(kernel_eng, bb, &addr);
XE_WARN_ON(q->vm && q->vm->flags & XE_VM_FLAG_MIGRATION);
return __xe_bb_create_job(q, bb, &addr);
}
void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence)
......
......@@ -11,16 +11,16 @@
struct dma_fence;
struct xe_gt;
struct xe_engine;
struct xe_exec_queue;
struct xe_sched_job;
struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 size, bool usm);
struct xe_sched_job *xe_bb_create_job(struct xe_engine *kernel_eng,
struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
struct xe_bb *bb);
struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng,
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
struct xe_bb *bb, u64 batch_ofs,
u32 second_idx);
struct xe_sched_job *xe_bb_create_wa_job(struct xe_engine *wa_eng,
struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q,
struct xe_bb *bb, u64 batch_ofs);
void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence);
......
......@@ -53,9 +53,9 @@ static struct xe_device *coredump_to_xe(const struct xe_devcoredump *coredump)
return container_of(coredump, struct xe_device, devcoredump);
}
static struct xe_guc *engine_to_guc(struct xe_engine *e)
static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q)
{
return &e->gt->uc.guc;
return &q->gt->uc.guc;
}
static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
......@@ -91,7 +91,7 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
drm_printf(&p, "\n**** GuC CT ****\n");
xe_guc_ct_snapshot_print(coredump->snapshot.ct, &p);
xe_guc_engine_snapshot_print(coredump->snapshot.ge, &p);
xe_guc_exec_queue_snapshot_print(coredump->snapshot.ge, &p);
drm_printf(&p, "\n**** HW Engines ****\n");
for (i = 0; i < XE_NUM_HW_ENGINES; i++)
......@@ -112,7 +112,7 @@ static void xe_devcoredump_free(void *data)
return;
xe_guc_ct_snapshot_free(coredump->snapshot.ct);
xe_guc_engine_snapshot_free(coredump->snapshot.ge);
xe_guc_exec_queue_snapshot_free(coredump->snapshot.ge);
for (i = 0; i < XE_NUM_HW_ENGINES; i++)
if (coredump->snapshot.hwe[i])
xe_hw_engine_snapshot_free(coredump->snapshot.hwe[i]);
......@@ -123,14 +123,14 @@ static void xe_devcoredump_free(void *data)
}
static void devcoredump_snapshot(struct xe_devcoredump *coredump,
struct xe_engine *e)
struct xe_exec_queue *q)
{
struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
struct xe_guc *guc = engine_to_guc(e);
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
u32 adj_logical_mask = e->logical_mask;
u32 width_mask = (0x1 << e->width) - 1;
u32 adj_logical_mask = q->logical_mask;
u32 width_mask = (0x1 << q->width) - 1;
int i;
bool cookie;
......@@ -138,22 +138,22 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
ss->boot_time = ktime_get_boottime();
cookie = dma_fence_begin_signalling();
for (i = 0; e->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
if (adj_logical_mask & BIT(i)) {
adj_logical_mask |= width_mask << i;
i += e->width;
i += q->width;
} else {
++i;
}
}
xe_force_wake_get(gt_to_fw(e->gt), XE_FORCEWAKE_ALL);
xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
coredump->snapshot.ge = xe_guc_engine_snapshot_capture(e);
coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q);
for_each_hw_engine(hwe, e->gt, id) {
if (hwe->class != e->hwe->class ||
for_each_hw_engine(hwe, q->gt, id) {
if (hwe->class != q->hwe->class ||
!(BIT(hwe->logical_instance) & adj_logical_mask)) {
coredump->snapshot.hwe[id] = NULL;
continue;
......@@ -161,21 +161,21 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe);
}
xe_force_wake_put(gt_to_fw(e->gt), XE_FORCEWAKE_ALL);
xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
dma_fence_end_signalling(cookie);
}
/**
* xe_devcoredump - Take the required snapshots and initialize coredump device.
* @e: The faulty xe_engine, where the issue was detected.
* @q: The faulty xe_exec_queue, where the issue was detected.
*
* This function should be called at the crash time within the serialized
* gt_reset. It is skipped if we still have the core dump device available
* with the information of the 'first' snapshot.
*/
void xe_devcoredump(struct xe_engine *e)
void xe_devcoredump(struct xe_exec_queue *q)
{
struct xe_device *xe = gt_to_xe(e->gt);
struct xe_device *xe = gt_to_xe(q->gt);
struct xe_devcoredump *coredump = &xe->devcoredump;
if (coredump->captured) {
......@@ -184,7 +184,7 @@ void xe_devcoredump(struct xe_engine *e)
}
coredump->captured = true;
devcoredump_snapshot(coredump, e);
devcoredump_snapshot(coredump, q);
drm_info(&xe->drm, "Xe device coredump has been created\n");
drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
......
......@@ -7,12 +7,12 @@
#define _XE_DEVCOREDUMP_H_
struct xe_device;
struct xe_engine;
struct xe_exec_queue;
#ifdef CONFIG_DEV_COREDUMP
void xe_devcoredump(struct xe_engine *e);
void xe_devcoredump(struct xe_exec_queue *q);
#else
static inline void xe_devcoredump(struct xe_engine *e)
static inline void xe_devcoredump(struct xe_exec_queue *q)
{
}
#endif
......
......@@ -30,7 +30,7 @@ struct xe_devcoredump_snapshot {
/** @ct: GuC CT snapshot */
struct xe_guc_ct_snapshot *ct;
/** @ge: Guc Engine snapshot */
struct xe_guc_submit_engine_snapshot *ge;
struct xe_guc_submit_exec_queue_snapshot *ge;
/** @hwe: HW Engine snapshot array */
struct xe_hw_engine_snapshot *hwe[XE_NUM_HW_ENGINES];
};
......
......@@ -53,33 +53,33 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
mutex_init(&xef->vm.lock);
xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1);
mutex_init(&xef->engine.lock);
xa_init_flags(&xef->engine.xa, XA_FLAGS_ALLOC1);
mutex_init(&xef->exec_queue.lock);
xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
file->driver_priv = xef;
return 0;
}
static void device_kill_persistent_engines(struct xe_device *xe,
struct xe_file *xef);
static void device_kill_persistent_exec_queues(struct xe_device *xe,
struct xe_file *xef);
static void xe_file_close(struct drm_device *dev, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
struct xe_file *xef = file->driver_priv;
struct xe_vm *vm;
struct xe_engine *e;
struct xe_exec_queue *q;
unsigned long idx;
mutex_lock(&xef->engine.lock);
xa_for_each(&xef->engine.xa, idx, e) {
xe_engine_kill(e);
xe_engine_put(e);
mutex_lock(&xef->exec_queue.lock);
xa_for_each(&xef->exec_queue.xa, idx, q) {
xe_exec_queue_kill(q);
xe_exec_queue_put(q);
}
mutex_unlock(&xef->engine.lock);
xa_destroy(&xef->engine.xa);
mutex_destroy(&xef->engine.lock);
device_kill_persistent_engines(xe, xef);
mutex_unlock(&xef->exec_queue.lock);
xa_destroy(&xef->exec_queue.xa);
mutex_destroy(&xef->exec_queue.lock);
device_kill_persistent_exec_queues(xe, xef);
mutex_lock(&xef->vm.lock);
xa_for_each(&xef->vm.xa, idx, vm)
......@@ -99,15 +99,15 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_ENGINE_CREATE, xe_engine_create_ioctl,
DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_ENGINE_GET_PROPERTY, xe_engine_get_property_ioctl,
DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_ENGINE_DESTROY, xe_engine_destroy_ioctl,
DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_MMIO, xe_mmio_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_ENGINE_SET_PROPERTY, xe_engine_set_property_ioctl,
DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_SET_PROPERTY, xe_exec_queue_set_property_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
DRM_RENDER_ALLOW),
......@@ -324,33 +324,33 @@ void xe_device_shutdown(struct xe_device *xe)
{
}
void xe_device_add_persistent_engines(struct xe_device *xe, struct xe_engine *e)
void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q)
{
mutex_lock(&xe->persistent_engines.lock);
list_add_tail(&e->persistent.link, &xe->persistent_engines.list);
list_add_tail(&q->persistent.link, &xe->persistent_engines.list);
mutex_unlock(&xe->persistent_engines.lock);
}
void xe_device_remove_persistent_engines(struct xe_device *xe,
struct xe_engine *e)
void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
struct xe_exec_queue *q)
{
mutex_lock(&xe->persistent_engines.lock);
if (!list_empty(&e->persistent.link))
list_del(&e->persistent.link);
if (!list_empty(&q->persistent.link))
list_del(&q->persistent.link);
mutex_unlock(&xe->persistent_engines.lock);
}
static void device_kill_persistent_engines(struct xe_device *xe,
struct xe_file *xef)
static void device_kill_persistent_exec_queues(struct xe_device *xe,
struct xe_file *xef)
{
struct xe_engine *e, *next;
struct xe_exec_queue *q, *next;
mutex_lock(&xe->persistent_engines.lock);
list_for_each_entry_safe(e, next, &xe->persistent_engines.list,
list_for_each_entry_safe(q, next, &xe->persistent_engines.list,
persistent.link)
if (e->persistent.xef == xef) {
xe_engine_kill(e);
list_del_init(&e->persistent.link);
if (q->persistent.xef == xef) {
xe_exec_queue_kill(q);
list_del_init(&q->persistent.link);
}
mutex_unlock(&xe->persistent_engines.lock);
}
......
......@@ -6,7 +6,7 @@
#ifndef _XE_DEVICE_H_
#define _XE_DEVICE_H_
struct xe_engine;
struct xe_exec_queue;
struct xe_file;
#include <drm/drm_util.h>
......@@ -41,9 +41,9 @@ int xe_device_probe(struct xe_device *xe);
void xe_device_remove(struct xe_device *xe);
void xe_device_shutdown(struct xe_device *xe);
void xe_device_add_persistent_engines(struct xe_device *xe, struct xe_engine *e);
void xe_device_remove_persistent_engines(struct xe_device *xe,
struct xe_engine *e);
void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q);
void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
struct xe_exec_queue *q);
void xe_device_wmb(struct xe_device *xe);
......
......@@ -377,13 +377,13 @@ struct xe_file {
struct mutex lock;
} vm;
/** @engine: Submission engine state for file */
/** @exec_queue: Submission exec queue state for file */
struct {
/** @xe: xarray to store engines */
struct xarray xa;
/** @lock: protects file engine state */
struct mutex lock;
} engine;
} exec_queue;
};
#endif
......@@ -95,19 +95,19 @@
#define XE_EXEC_BIND_RETRY_TIMEOUT_MS 1000
static int xe_exec_begin(struct xe_engine *e, struct ww_acquire_ctx *ww,
static int xe_exec_begin(struct xe_exec_queue *q, struct ww_acquire_ctx *ww,
struct ttm_validate_buffer tv_onstack[],
struct ttm_validate_buffer **tv,
struct list_head *objs)
{
struct xe_vm *vm = e->vm;
struct xe_vm *vm = q->vm;
struct xe_vma *vma;
LIST_HEAD(dups);
ktime_t end = 0;
int err = 0;
*tv = NULL;
if (xe_vm_no_dma_fences(e->vm))
if (xe_vm_no_dma_fences(q->vm))
return 0;
retry:
......@@ -153,14 +153,14 @@ static int xe_exec_begin(struct xe_engine *e, struct ww_acquire_ctx *ww,
return err;
}
static void xe_exec_end(struct xe_engine *e,
static void xe_exec_end(struct xe_exec_queue *q,
struct ttm_validate_buffer *tv_onstack,
struct ttm_validate_buffer *tv,
struct ww_acquire_ctx *ww,
struct list_head *objs)
{
if (!xe_vm_no_dma_fences(e->vm))
xe_vm_unlock_dma_resv(e->vm, tv_onstack, tv, ww, objs);
if (!xe_vm_no_dma_fences(q->vm))
xe_vm_unlock_dma_resv(q->vm, tv_onstack, tv, ww, objs);
}
int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
......@@ -170,7 +170,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_xe_exec *args = data;
struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs);
u64 __user *addresses_user = u64_to_user_ptr(args->address);
struct xe_engine *engine;
struct xe_exec_queue *q;
struct xe_sync_entry *syncs = NULL;
u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
......@@ -189,30 +189,30 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
engine = xe_engine_lookup(xef, args->engine_id);
if (XE_IOCTL_DBG(xe, !engine))
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
if (XE_IOCTL_DBG(xe, engine->flags & ENGINE_FLAG_VM))
if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
return -EINVAL;
if (XE_IOCTL_DBG(xe, engine->width != args->num_batch_buffer))
if (XE_IOCTL_DBG(xe, q->width != args->num_batch_buffer))
return -EINVAL;
if (XE_IOCTL_DBG(xe, engine->flags & ENGINE_FLAG_BANNED)) {
if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_BANNED)) {
err = -ECANCELED;
goto err_engine;
goto err_exec_queue;
}
if (args->num_syncs) {
syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
if (!syncs) {
err = -ENOMEM;
goto err_engine;
goto err_exec_queue;
}
}
vm = engine->vm;
vm = q->vm;
for (i = 0; i < args->num_syncs; i++) {
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++],
......@@ -222,9 +222,9 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto err_syncs;
}
if (xe_engine_is_parallel(engine)) {
if (xe_exec_queue_is_parallel(q)) {
err = __copy_from_user(addresses, addresses_user, sizeof(u64) *
engine->width);
q->width);
if (err) {
err = -EFAULT;
goto err_syncs;
......@@ -294,26 +294,26 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto err_unlock_list;
}
err = xe_exec_begin(engine, &ww, tv_onstack, &tv, &objs);
err = xe_exec_begin(q, &ww, tv_onstack, &tv, &objs);
if (err)
goto err_unlock_list;
if (xe_vm_is_closed_or_banned(engine->vm)) {
if (xe_vm_is_closed_or_banned(q->vm)) {
drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n");
err = -ECANCELED;
goto err_engine_end;
goto err_exec_queue_end;
}
if (xe_engine_is_lr(engine) && xe_engine_ring_full(engine)) {
if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
err = -EWOULDBLOCK;
goto err_engine_end;
goto err_exec_queue_end;
}
job = xe_sched_job_create(engine, xe_engine_is_parallel(engine) ?
job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
addresses : &args->address);
if (IS_ERR(job)) {
err = PTR_ERR(job);
goto err_engine_end;
goto err_exec_queue_end;
}
/*
......@@ -395,8 +395,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
xe_sync_entry_signal(&syncs[i], job,
&job->drm.s_fence->finished);
if (xe_engine_is_lr(engine))
engine->ring_ops->emit_job(job);
if (xe_exec_queue_is_lr(q))
q->ring_ops->emit_job(job);
xe_sched_job_push(job);
xe_vm_reactivate_rebind(vm);
......@@ -412,8 +412,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
err_put_job:
if (err)
xe_sched_job_put(job);
err_engine_end:
xe_exec_end(engine, tv_onstack, tv, &ww, &objs);
err_exec_queue_end:
xe_exec_end(q, tv_onstack, tv, &ww, &objs);
err_unlock_list:
if (write_locked)
up_write(&vm->lock);
......@@ -425,8 +425,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; i < num_syncs; i++)
xe_sync_entry_cleanup(&syncs[i]);
kfree(syncs);
err_engine:
xe_engine_put(engine);
err_exec_queue:
xe_exec_queue_put(q);
return err;
}
This diff is collapsed.
......@@ -3,10 +3,10 @@
* Copyright © 2021 Intel Corporation
*/
#ifndef _XE_ENGINE_H_
#define _XE_ENGINE_H_
#ifndef _XE_EXEC_QUEUE_H_
#define _XE_EXEC_QUEUE_H_
#include "xe_engine_types.h"
#include "xe_exec_queue_types.h"
#include "xe_vm_types.h"
struct drm_device;
......@@ -14,50 +14,50 @@ struct drm_file;
struct xe_device;
struct xe_file;
struct xe_engine *xe_engine_create(struct xe_device *xe, struct xe_vm *vm,
u32 logical_mask, u16 width,
struct xe_hw_engine *hw_engine, u32 flags);
struct xe_engine *xe_engine_create_class(struct xe_device *xe, struct xe_gt *gt,
struct xe_vm *vm,
enum xe_engine_class class, u32 flags);
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
u32 logical_mask, u16 width,
struct xe_hw_engine *hw_engine, u32 flags);
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
struct xe_vm *vm,
enum xe_engine_class class, u32 flags);
void xe_engine_fini(struct xe_engine *e);
void xe_engine_destroy(struct kref *ref);
void xe_exec_queue_fini(struct xe_exec_queue *q);
void xe_exec_queue_destroy(struct kref *ref);
struct xe_engine *xe_engine_lookup(struct xe_file *xef, u32 id);
struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id);
static inline struct xe_engine *xe_engine_get(struct xe_engine *engine)
static inline struct xe_exec_queue *xe_exec_queue_get(struct xe_exec_queue *q)
{
kref_get(&engine->refcount);
return engine;
kref_get(&q->refcount);
return q;
}
static inline void xe_engine_put(struct xe_engine *engine)
static inline void xe_exec_queue_put(struct xe_exec_queue *q)
{
kref_put(&engine->refcount, xe_engine_destroy);
kref_put(&q->refcount, xe_exec_queue_destroy);
}
static inline bool xe_engine_is_parallel(struct xe_engine *engine)
static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
{
return engine->width > 1;
return q->width > 1;
}
bool xe_engine_is_lr(struct xe_engine *e);
bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
bool xe_engine_ring_full(struct xe_engine *e);
bool xe_exec_queue_ring_full(struct xe_exec_queue *q);
bool xe_engine_is_idle(struct xe_engine *engine);
bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
void xe_engine_kill(struct xe_engine *e);
void xe_exec_queue_kill(struct xe_exec_queue *q);
int xe_engine_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_engine_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_engine_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_engine_get_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
enum xe_engine_priority xe_engine_device_get_max_priority(struct xe_device *xe);
int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_exec_queue_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
enum xe_exec_queue_priority xe_exec_queue_device_get_max_priority(struct xe_device *xe);
#endif
......@@ -3,8 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
#ifndef _XE_ENGINE_TYPES_H_
#define _XE_ENGINE_TYPES_H_
#ifndef _XE_EXEC_QUEUE_TYPES_H_
#define _XE_EXEC_QUEUE_TYPES_H_
#include <linux/kref.h>
......@@ -15,30 +15,30 @@
#include "xe_hw_fence_types.h"
#include "xe_lrc_types.h"
struct xe_execlist_engine;
struct xe_execlist_exec_queue;
struct xe_gt;
struct xe_guc_engine;
struct xe_guc_exec_queue;
struct xe_hw_engine;
struct xe_vm;
enum xe_engine_priority {
XE_ENGINE_PRIORITY_UNSET = -2, /* For execlist usage only */
XE_ENGINE_PRIORITY_LOW = 0,
XE_ENGINE_PRIORITY_NORMAL,
XE_ENGINE_PRIORITY_HIGH,
XE_ENGINE_PRIORITY_KERNEL,
enum xe_exec_queue_priority {
XE_EXEC_QUEUE_PRIORITY_UNSET = -2, /* For execlist usage only */
XE_EXEC_QUEUE_PRIORITY_LOW = 0,
XE_EXEC_QUEUE_PRIORITY_NORMAL,
XE_EXEC_QUEUE_PRIORITY_HIGH,
XE_EXEC_QUEUE_PRIORITY_KERNEL,
XE_ENGINE_PRIORITY_COUNT
XE_EXEC_QUEUE_PRIORITY_COUNT
};
/**
* struct xe_engine - Submission engine
* struct xe_exec_queue - Execution queue
*
* Contains all state necessary for submissions. Can either be a user object or
* a kernel object.
*/
struct xe_engine {
/** @gt: graphics tile this engine can submit to */
struct xe_exec_queue {
/** @gt: graphics tile this exec queue can submit to */
struct xe_gt *gt;
/**
* @hwe: A hardware of the same class. May (physical engine) or may not
......@@ -46,36 +46,36 @@ struct xe_engine {
* really be used for submissions.
*/
struct xe_hw_engine *hwe;
/** @refcount: ref count of this engine */
/** @refcount: ref count of this exec queue */
struct kref refcount;
/** @vm: VM (address space) for this engine */
/** @vm: VM (address space) for this exec queue */
struct xe_vm *vm;
/** @class: class of this engine */
/** @class: class of this exec queue */
enum xe_engine_class class;
/** @priority: priority of this exec queue */
enum xe_engine_priority priority;
enum xe_exec_queue_priority priority;
/**
* @logical_mask: logical mask of where job submitted to engine can run
* @logical_mask: logical mask of where job submitted to exec queue can run
*/
u32 logical_mask;
/** @name: name of this engine */
/** @name: name of this exec queue */
char name[MAX_FENCE_NAME_LEN];
/** @width: width (number BB submitted per exec) of this engine */
/** @width: width (number BB submitted per exec) of this exec queue */
u16 width;
/** @fence_irq: fence IRQ used to signal job completion */
struct xe_hw_fence_irq *fence_irq;
#define ENGINE_FLAG_BANNED BIT(0)
#define ENGINE_FLAG_KERNEL BIT(1)
#define ENGINE_FLAG_PERSISTENT BIT(2)
#define ENGINE_FLAG_COMPUTE_MODE BIT(3)
/* Caller needs to hold rpm ref when creating engine with ENGINE_FLAG_VM */
#define ENGINE_FLAG_VM BIT(4)
#define ENGINE_FLAG_BIND_ENGINE_CHILD BIT(5)
#define ENGINE_FLAG_WA BIT(6)
#define EXEC_QUEUE_FLAG_BANNED BIT(0)
#define EXEC_QUEUE_FLAG_KERNEL BIT(1)
#define EXEC_QUEUE_FLAG_PERSISTENT BIT(2)
#define EXEC_QUEUE_FLAG_COMPUTE_MODE BIT(3)
/* Caller needs to hold rpm ref when creating engine with EXEC_QUEUE_FLAG_VM */
#define EXEC_QUEUE_FLAG_VM BIT(4)
#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5)
#define EXEC_QUEUE_FLAG_WA BIT(6)
/**
* @flags: flags for this engine, should statically setup aside from ban
* @flags: flags for this exec queue, should statically setup aside from ban
* bit
*/
unsigned long flags;
......@@ -88,19 +88,19 @@ struct xe_engine {
};
union {
/** @execlist: execlist backend specific state for engine */
struct xe_execlist_engine *execlist;
/** @guc: GuC backend specific state for engine */
struct xe_guc_engine *guc;
/** @execlist: execlist backend specific state for exec queue */
struct xe_execlist_exec_queue *execlist;
/** @guc: GuC backend specific state for exec queue */
struct xe_guc_exec_queue *guc;
};
/**
* @persistent: persistent engine state
* @persistent: persistent exec queue state
*/
struct {
/** @xef: file which this engine belongs to */
/** @xef: file which this exec queue belongs to */
struct xe_file *xef;
/** @link: link in list of persistent engines */
/** @link: link in list of persistent exec queues */
struct list_head link;
} persistent;
......@@ -133,7 +133,7 @@ struct xe_engine {
u32 preempt_timeout_us;
} sched_props;
/** @compute: compute engine state */
/** @compute: compute exec queue state */
struct {
/** @pfence: preemption fence */
struct dma_fence *pfence;
......@@ -141,7 +141,7 @@ struct xe_engine {
u64 context;
/** @seqno: preemption fence seqno */
u32 seqno;
/** @link: link into VM's list of engines */
/** @link: link into VM's list of exec queues */
struct list_head link;
/** @lock: preemption fences lock */
spinlock_t lock;
......@@ -157,53 +157,53 @@ struct xe_engine {
u32 acc_granularity;
} usm;
/** @ops: submission backend engine operations */
const struct xe_engine_ops *ops;
/** @ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *ops;
/** @ring_ops: ring operations for this engine */
/** @ring_ops: ring operations for this exec queue */
const struct xe_ring_ops *ring_ops;
/** @entity: DRM sched entity for this engine (1 to 1 relationship) */
/** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
struct drm_sched_entity *entity;
/** @lrc: logical ring context for this engine */
/** @lrc: logical ring context for this exec queue */
struct xe_lrc lrc[];
};
/**
* struct xe_engine_ops - Submission backend engine operations
* struct xe_exec_queue_ops - Submission backend exec queue operations
*/
struct xe_engine_ops {
/** @init: Initialize engine for submission backend */
int (*init)(struct xe_engine *e);
struct xe_exec_queue_ops {
/** @init: Initialize exec queue for submission backend */
int (*init)(struct xe_exec_queue *q);
/** @kill: Kill inflight submissions for backend */
void (*kill)(struct xe_engine *e);
/** @fini: Fini engine for submission backend */
void (*fini)(struct xe_engine *e);
/** @set_priority: Set priority for engine */
int (*set_priority)(struct xe_engine *e,
enum xe_engine_priority priority);
/** @set_timeslice: Set timeslice for engine */
int (*set_timeslice)(struct xe_engine *e, u32 timeslice_us);
/** @set_preempt_timeout: Set preemption timeout for engine */
int (*set_preempt_timeout)(struct xe_engine *e, u32 preempt_timeout_us);
/** @set_job_timeout: Set job timeout for engine */
int (*set_job_timeout)(struct xe_engine *e, u32 job_timeout_ms);
void (*kill)(struct xe_exec_queue *q);
/** @fini: Fini exec queue for submission backend */
void (*fini)(struct xe_exec_queue *q);
/** @set_priority: Set priority for exec queue */
int (*set_priority)(struct xe_exec_queue *q,
enum xe_exec_queue_priority priority);
/** @set_timeslice: Set timeslice for exec queue */
int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
/** @set_preempt_timeout: Set preemption timeout for exec queue */
int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us);
/** @set_job_timeout: Set job timeout for exec queue */
int (*set_job_timeout)(struct xe_exec_queue *q, u32 job_timeout_ms);
/**
* @suspend: Suspend engine from executing, allowed to be called
* @suspend: Suspend exec queue from executing, allowed to be called
* multiple times in a row before resume with the caveat that
* suspend_wait returns before calling suspend again.
*/
int (*suspend)(struct xe_engine *e);
int (*suspend)(struct xe_exec_queue *q);
/**
* @suspend_wait: Wait for an engine to suspend executing, should be
* @suspend_wait: Wait for an exec queue to suspend executing, should be
* call after suspend.
*/
void (*suspend_wait)(struct xe_engine *e);
void (*suspend_wait)(struct xe_exec_queue *q);
/**
* @resume: Resume engine execution, engine must be in a suspended
* @resume: Resume exec queue execution, exec queue must be in a suspended
* state and dma fence returned from most recent suspend call must be
* signalled when this function is called.
*/
void (*resume)(struct xe_engine *e);
void (*resume)(struct xe_exec_queue *q);
};
#endif
This diff is collapsed.
......@@ -10,27 +10,27 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include "xe_engine_types.h"
#include "xe_exec_queue_types.h"
struct xe_hw_engine;
struct xe_execlist_engine;
struct xe_execlist_exec_queue;
struct xe_execlist_port {
struct xe_hw_engine *hwe;
spinlock_t lock;
struct list_head active[XE_ENGINE_PRIORITY_COUNT];
struct list_head active[XE_EXEC_QUEUE_PRIORITY_COUNT];
u32 last_ctx_id;
struct xe_execlist_engine *running_exl;
struct xe_execlist_exec_queue *running_exl;
struct timer_list irq_fail;
};
struct xe_execlist_engine {
struct xe_engine *engine;
struct xe_execlist_exec_queue {
struct xe_exec_queue *q;
struct drm_gpu_scheduler sched;
......@@ -42,7 +42,7 @@ struct xe_execlist_engine {
struct work_struct fini_async;
enum xe_engine_priority active_priority;
enum xe_exec_queue_priority active_priority;
struct list_head active_link;
};
......
......@@ -26,7 +26,7 @@
#include "xe_gt_sysfs.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_gt_topology.h"
#include "xe_guc_engine_types.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_hw_fence.h"
#include "xe_irq.h"
#include "xe_lrc.h"
......@@ -81,7 +81,7 @@ static void gt_fini(struct drm_device *drm, void *arg)
static void gt_reset_worker(struct work_struct *w);
static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
{
struct xe_sched_job *job;
struct xe_bb *bb;
......@@ -94,7 +94,7 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
return PTR_ERR(bb);
batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
job = xe_bb_create_wa_job(e, bb, batch_ofs);
job = xe_bb_create_wa_job(q, bb, batch_ofs);
if (IS_ERR(job)) {
xe_bb_free(bb, NULL);
return PTR_ERR(job);
......@@ -115,9 +115,9 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
return 0;
}
static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e)
static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
{
struct xe_reg_sr *sr = &e->hwe->reg_lrc;
struct xe_reg_sr *sr = &q->hwe->reg_lrc;
struct xe_reg_sr_entry *entry;
unsigned long reg;
struct xe_sched_job *job;
......@@ -143,7 +143,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e)
}
batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
job = xe_bb_create_wa_job(e, bb, batch_ofs);
job = xe_bb_create_wa_job(q, bb, batch_ofs);
if (IS_ERR(job)) {
xe_bb_free(bb, NULL);
return PTR_ERR(job);
......@@ -173,7 +173,7 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
int err = 0;
for_each_hw_engine(hwe, gt, id) {
struct xe_engine *e, *nop_e;
struct xe_exec_queue *q, *nop_q;
struct xe_vm *vm;
void *default_lrc;
......@@ -192,58 +192,58 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
return -ENOMEM;
vm = xe_migrate_get_vm(tile->migrate);
e = xe_engine_create(xe, vm, BIT(hwe->logical_instance), 1,
hwe, ENGINE_FLAG_WA);
if (IS_ERR(e)) {
err = PTR_ERR(e);
xe_gt_err(gt, "hwe %s: xe_engine_create failed (%pe)\n",
hwe->name, e);
q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance), 1,
hwe, EXEC_QUEUE_FLAG_WA);
if (IS_ERR(q)) {
err = PTR_ERR(q);
xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
hwe->name, q);
goto put_vm;
}
/* Prime golden LRC with known good state */
err = emit_wa_job(gt, e);
err = emit_wa_job(gt, q);
if (err) {
xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
hwe->name, ERR_PTR(err), e->guc->id);
goto put_engine;
hwe->name, ERR_PTR(err), q->guc->id);
goto put_exec_queue;
}
nop_e = xe_engine_create(xe, vm, BIT(hwe->logical_instance),
1, hwe, ENGINE_FLAG_WA);
if (IS_ERR(nop_e)) {
err = PTR_ERR(nop_e);
xe_gt_err(gt, "hwe %s: nop xe_engine_create failed (%pe)\n",
hwe->name, nop_e);
goto put_engine;
nop_q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance),
1, hwe, EXEC_QUEUE_FLAG_WA);
if (IS_ERR(nop_q)) {
err = PTR_ERR(nop_q);
xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
hwe->name, nop_q);
goto put_exec_queue;
}
/* Switch to different LRC */
err = emit_nop_job(gt, nop_e);
err = emit_nop_job(gt, nop_q);
if (err) {
xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
hwe->name, ERR_PTR(err), nop_e->guc->id);
goto put_nop_e;
hwe->name, ERR_PTR(err), nop_q->guc->id);
goto put_nop_q;
}
/* Reload golden LRC to record the effect of any indirect W/A */
err = emit_nop_job(gt, e);
err = emit_nop_job(gt, q);
if (err) {
xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
hwe->name, ERR_PTR(err), e->guc->id);
goto put_nop_e;
hwe->name, ERR_PTR(err), q->guc->id);
goto put_nop_q;
}
xe_map_memcpy_from(xe, default_lrc,
&e->lrc[0].bo->vmap,
xe_lrc_pphwsp_offset(&e->lrc[0]),
&q->lrc[0].bo->vmap,
xe_lrc_pphwsp_offset(&q->lrc[0]),
xe_lrc_size(xe, hwe->class));
gt->default_lrc[hwe->class] = default_lrc;
put_nop_e:
xe_engine_put(nop_e);
put_engine:
xe_engine_put(e);
put_nop_q:
xe_exec_queue_put(nop_q);
put_exec_queue:
xe_exec_queue_put(q);
put_vm:
xe_vm_put(vm);
if (err)
......
......@@ -14,7 +14,7 @@
#include "xe_sa_types.h"
#include "xe_uc_types.h"
struct xe_engine_ops;
struct xe_exec_queue_ops;
struct xe_migrate;
struct xe_ring_ops;
......@@ -269,8 +269,8 @@ struct xe_gt {
/** @gtidle: idle properties of GT */
struct xe_gt_idle gtidle;
/** @engine_ops: submission backend engine operations */
const struct xe_engine_ops *engine_ops;
/** @exec_queue_ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *exec_queue_ops;
/**
* @ring_ops: ring operations for this hw engine (1 per engine class)
......
......@@ -495,7 +495,7 @@ static void guc_mmio_reg_state_init(struct xe_guc_ads *ads)
u8 gc;
/*
* 1. Write all MMIO entries for this engine to the table. No
* 1. Write all MMIO entries for this exec queue to the table. No
* need to worry about fused-off engines and when there are
* entries in the regset: the reg_state_list has been zero'ed
* by xe_guc_ads_populate()
......
......@@ -888,11 +888,11 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
break;
case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
ret = xe_guc_engine_reset_handler(guc, payload, adj_len);
ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
break;
case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
ret = xe_guc_engine_reset_failure_handler(guc, payload,
adj_len);
ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
adj_len);
break;
case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
/* Selftest only at the moment */
......@@ -902,8 +902,8 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
/* FIXME: Handle this */
break;
case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
ret = xe_guc_engine_memory_cat_error_handler(guc, payload,
adj_len);
ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
adj_len);
break;
case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
ret = xe_guc_pagefault_handler(guc, payload, adj_len);
......
......@@ -12,22 +12,22 @@
#include "xe_gpu_scheduler_types.h"
struct dma_fence;
struct xe_engine;
struct xe_exec_queue;
/**
* struct xe_guc_engine - GuC specific state for an xe_engine
* struct xe_guc_exec_queue - GuC specific state for an xe_exec_queue
*/
struct xe_guc_engine {
/** @engine: Backpointer to parent xe_engine */
struct xe_engine *engine;
/** @sched: GPU scheduler for this xe_engine */
struct xe_guc_exec_queue {
/** @q: Backpointer to parent xe_exec_queue */
struct xe_exec_queue *q;
/** @sched: GPU scheduler for this xe_exec_queue */
struct xe_gpu_scheduler sched;
/** @entity: Scheduler entity for this xe_engine */
/** @entity: Scheduler entity for this xe_exec_queue */
struct xe_sched_entity entity;
/**
* @static_msgs: Static messages for this xe_engine, used when a message
* needs to sent through the GPU scheduler but memory allocations are
* not allowed.
* @static_msgs: Static messages for this xe_exec_queue, used when
* a message needs to sent through the GPU scheduler but memory
* allocations are not allowed.
*/
#define MAX_STATIC_MSG_TYPE 3
struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
......@@ -37,17 +37,17 @@ struct xe_guc_engine {
struct work_struct fini_async;
/** @resume_time: time of last resume */
u64 resume_time;
/** @state: GuC specific state for this xe_engine */
/** @state: GuC specific state for this xe_exec_queue */
atomic_t state;
/** @wqi_head: work queue item tail */
u32 wqi_head;
/** @wqi_tail: work queue item tail */
u32 wqi_tail;
/** @id: GuC id for this xe_engine */
/** @id: GuC id for this exec_queue */
u16 id;
/** @suspend_wait: wait queue used to wait on pending suspends */
wait_queue_head_t suspend_wait;
/** @suspend_pending: a suspend of the engine is pending */
/** @suspend_pending: a suspend of the exec_queue is pending */
bool suspend_pending;
};
......
......@@ -69,13 +69,13 @@ struct guc_klv_generic_dw_t {
} __packed;
/* Format of the UPDATE_CONTEXT_POLICIES H2G data packet */
struct guc_update_engine_policy_header {
struct guc_update_exec_queue_policy_header {
u32 action;
u32 guc_id;
} __packed;
struct guc_update_engine_policy {
struct guc_update_engine_policy_header header;
struct guc_update_exec_queue_policy {
struct guc_update_exec_queue_policy_header header;
struct guc_klv_generic_dw_t klv[GUC_CONTEXT_POLICIES_KLV_NUM_IDS];
} __packed;
......
This diff is collapsed.
......@@ -9,7 +9,7 @@
#include <linux/types.h>
struct drm_printer;
struct xe_engine;
struct xe_exec_queue;
struct xe_guc;
int xe_guc_submit_init(struct xe_guc *guc);
......@@ -21,18 +21,18 @@ int xe_guc_submit_start(struct xe_guc *guc);
int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_engine_reset_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_engine_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
u32 len);
int xe_guc_engine_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
u32 len);
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
struct xe_guc_submit_engine_snapshot *
xe_guc_engine_snapshot_capture(struct xe_engine *e);
struct xe_guc_submit_exec_queue_snapshot *
xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
void
xe_guc_engine_snapshot_print(struct xe_guc_submit_engine_snapshot *snapshot,
struct drm_printer *p);
xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
struct drm_printer *p);
void
xe_guc_engine_snapshot_free(struct xe_guc_submit_engine_snapshot *snapshot);
xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot);
void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p);
#endif
......@@ -79,20 +79,20 @@ struct pending_list_snapshot {
};
/**
* struct xe_guc_submit_engine_snapshot - Snapshot for devcoredump
* struct xe_guc_submit_exec_queue_snapshot - Snapshot for devcoredump
*/
struct xe_guc_submit_engine_snapshot {
/** @name: name of this engine */
struct xe_guc_submit_exec_queue_snapshot {
/** @name: name of this exec queue */
char name[MAX_FENCE_NAME_LEN];
/** @class: class of this engine */
/** @class: class of this exec queue */
enum xe_engine_class class;
/**
* @logical_mask: logical mask of where job submitted to engine can run
* @logical_mask: logical mask of where job submitted to exec queue can run
*/
u32 logical_mask;
/** @width: width (number BB submitted per exec) of this engine */
/** @width: width (number BB submitted per exec) of this exec queue */
u16 width;
/** @refcount: ref count of this engine */
/** @refcount: ref count of this exec queue */
u32 refcount;
/**
* @sched_timeout: the time after which a job is removed from the
......@@ -113,8 +113,8 @@ struct xe_guc_submit_engine_snapshot {
/** @schedule_state: Schedule State at the moment of Crash */
u32 schedule_state;
/** @engine_flags: Flags of the faulty engine */
unsigned long engine_flags;
/** @exec_queue_flags: Flags of the faulty exec_queue */
unsigned long exec_queue_flags;
/** @guc: GuC Engine Snapshot */
struct {
......@@ -122,7 +122,7 @@ struct xe_guc_submit_engine_snapshot {
u32 wqi_head;
/** @wqi_tail: work queue item tail */
u32 wqi_tail;
/** @id: GuC id for this xe_engine */
/** @id: GuC id for this exec_queue */
u16 id;
} guc;
......
......@@ -33,8 +33,8 @@ struct xe_guc {
struct xe_guc_pc pc;
/** @submission_state: GuC submission state */
struct {
/** @engine_lookup: Lookup an xe_engine from guc_id */
struct xarray engine_lookup;
/** @exec_queue_lookup: Lookup an xe_engine from guc_id */
struct xarray exec_queue_lookup;
/** @guc_ids: used to allocate new guc_ids, single-lrc */
struct ida guc_ids;
/** @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
......
......@@ -12,7 +12,7 @@
#include "regs/xe_regs.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_engine_types.h"
#include "xe_exec_queue_types.h"
#include "xe_gt.h"
#include "xe_hw_fence.h"
#include "xe_map.h"
......@@ -604,7 +604,7 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
#define ACC_NOTIFY_S 16
int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
struct xe_engine *e, struct xe_vm *vm, u32 ring_size)
struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size)
{
struct xe_gt *gt = hwe->gt;
struct xe_tile *tile = gt_to_tile(gt);
......@@ -669,12 +669,12 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
if (xe->info.has_asid && vm)
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID,
(e->usm.acc_granularity <<
(q->usm.acc_granularity <<
ACC_GRANULARITY_S) | vm->usm.asid);
if (xe->info.supports_usm && vm)
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ACC_CTR_THOLD,
(e->usm.acc_notify << ACC_NOTIFY_S) |
e->usm.acc_trigger);
(q->usm.acc_notify << ACC_NOTIFY_S) |
q->usm.acc_trigger);
lrc->desc = GEN8_CTX_VALID;
lrc->desc |= INTEL_LEGACY_64B_CONTEXT << GEN8_CTX_ADDRESSING_MODE_SHIFT;
......
......@@ -8,7 +8,7 @@
#include "xe_lrc_types.h"
struct xe_device;
struct xe_engine;
struct xe_exec_queue;
enum xe_engine_class;
struct xe_hw_engine;
struct xe_vm;
......@@ -16,7 +16,7 @@ struct xe_vm;
#define LRC_PPHWSP_SCRATCH_ADDR (0x34 * 4)
int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
struct xe_engine *e, struct xe_vm *vm, u32 ring_size);
struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size);
void xe_lrc_finish(struct xe_lrc *lrc);
size_t xe_lrc_size(struct xe_device *xe, enum xe_engine_class class);
......
......@@ -34,8 +34,8 @@
* struct xe_migrate - migrate context.
*/
struct xe_migrate {
/** @eng: Default engine used for migration */
struct xe_engine *eng;
/** @q: Default exec queue used for migration */
struct xe_exec_queue *q;
/** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
struct xe_tile *tile;
/** @job_mutex: Timeline mutex for @eng. */
......@@ -78,9 +78,9 @@ struct xe_migrate {
*
* Return: The default migrate engine
*/
struct xe_engine *xe_tile_migrate_engine(struct xe_tile *tile)
struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile)
{
return tile->migrate->eng;
return tile->migrate->q;
}
static void xe_migrate_fini(struct drm_device *dev, void *arg)
......@@ -88,11 +88,11 @@ static void xe_migrate_fini(struct drm_device *dev, void *arg)
struct xe_migrate *m = arg;
struct ww_acquire_ctx ww;
xe_vm_lock(m->eng->vm, &ww, 0, false);
xe_vm_lock(m->q->vm, &ww, 0, false);
xe_bo_unpin(m->pt_bo);
if (m->cleared_bo)
xe_bo_unpin(m->cleared_bo);
xe_vm_unlock(m->eng->vm, &ww);
xe_vm_unlock(m->q->vm, &ww);
dma_fence_put(m->fence);
if (m->cleared_bo)
......@@ -100,8 +100,8 @@ static void xe_migrate_fini(struct drm_device *dev, void *arg)
xe_bo_put(m->pt_bo);
drm_suballoc_manager_fini(&m->vm_update_sa);
mutex_destroy(&m->job_mutex);
xe_vm_close_and_put(m->eng->vm);
xe_engine_put(m->eng);
xe_vm_close_and_put(m->q->vm);
xe_exec_queue_put(m->q);
}
static u64 xe_migrate_vm_addr(u64 slot, u32 level)
......@@ -341,20 +341,20 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
if (!hwe)
return ERR_PTR(-EINVAL);
m->eng = xe_engine_create(xe, vm,
BIT(hwe->logical_instance), 1,
hwe, ENGINE_FLAG_KERNEL);
m->q = xe_exec_queue_create(xe, vm,
BIT(hwe->logical_instance), 1,
hwe, EXEC_QUEUE_FLAG_KERNEL);
} else {
m->eng = xe_engine_create_class(xe, primary_gt, vm,
XE_ENGINE_CLASS_COPY,
ENGINE_FLAG_KERNEL);
m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
XE_ENGINE_CLASS_COPY,
EXEC_QUEUE_FLAG_KERNEL);
}
if (IS_ERR(m->eng)) {
if (IS_ERR(m->q)) {
xe_vm_close_and_put(vm);
return ERR_CAST(m->eng);
return ERR_CAST(m->q);
}
if (xe->info.supports_usm)
m->eng->priority = XE_ENGINE_PRIORITY_KERNEL;
m->q->priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
mutex_init(&m->job_mutex);
......@@ -456,7 +456,7 @@ static void emit_pte(struct xe_migrate *m,
addr = xe_res_dma(cur) & PAGE_MASK;
if (is_vram) {
/* Is this a 64K PTE entry? */
if ((m->eng->vm->flags & XE_VM_FLAG_64K) &&
if ((m->q->vm->flags & XE_VM_FLAG_64K) &&
!(cur_ofs & (16 * 8 - 1))) {
XE_WARN_ON(!IS_ALIGNED(addr, SZ_64K));
addr |= XE_PTE_PS64;
......@@ -714,7 +714,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
src_L0, ccs_ofs, copy_ccs);
mutex_lock(&m->job_mutex);
job = xe_bb_create_migration_job(m->eng, bb,
job = xe_bb_create_migration_job(m->q, bb,
xe_migrate_batch_base(m, usm),
update_idx);
if (IS_ERR(job)) {
......@@ -938,7 +938,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
}
mutex_lock(&m->job_mutex);
job = xe_bb_create_migration_job(m->eng, bb,
job = xe_bb_create_migration_job(m->q, bb,
xe_migrate_batch_base(m, usm),
update_idx);
if (IS_ERR(job)) {
......@@ -1024,7 +1024,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
{
return xe_vm_get(m->eng->vm);
return xe_vm_get(m->q->vm);
}
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
......@@ -1106,7 +1106,7 @@ static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs)
* @m: The migrate context.
* @vm: The vm we'll be updating.
* @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
* @eng: The engine to be used for the update or NULL if the default
* @q: The exec queue to be used for the update or NULL if the default
* migration engine is to be used.
* @updates: An array of update descriptors.
* @num_updates: Number of descriptors in @updates.
......@@ -1132,7 +1132,7 @@ struct dma_fence *
xe_migrate_update_pgtables(struct xe_migrate *m,
struct xe_vm *vm,
struct xe_bo *bo,
struct xe_engine *eng,
struct xe_exec_queue *q,
const struct xe_vm_pgtable_update *updates,
u32 num_updates,
struct xe_sync_entry *syncs, u32 num_syncs,
......@@ -1150,13 +1150,13 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
u64 addr;
int err = 0;
bool usm = !eng && xe->info.supports_usm;
bool usm = !q && xe->info.supports_usm;
bool first_munmap_rebind = vma &&
vma->gpuva.flags & XE_VMA_FIRST_REBIND;
struct xe_engine *eng_override = !eng ? m->eng : eng;
struct xe_exec_queue *q_override = !q ? m->q : q;
/* Use the CPU if no in syncs and engine is idle */
if (no_in_syncs(syncs, num_syncs) && xe_engine_is_idle(eng_override)) {
if (no_in_syncs(syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
fence = xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
num_updates,
first_munmap_rebind,
......@@ -1186,14 +1186,14 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
*/
XE_WARN_ON(batch_size >= SZ_128K);
bb = xe_bb_new(gt, batch_size, !eng && xe->info.supports_usm);
bb = xe_bb_new(gt, batch_size, !q && xe->info.supports_usm);
if (IS_ERR(bb))
return ERR_CAST(bb);
/* For sysmem PTE's, need to map them in our hole.. */
if (!IS_DGFX(xe)) {
ppgtt_ofs = NUM_KERNEL_PDE - 1;
if (eng) {
if (q) {
XE_WARN_ON(num_updates > NUM_VMUSA_WRITES_PER_UNIT);
sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
......@@ -1249,10 +1249,10 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
write_pgtable(tile, bb, 0, &updates[i], pt_update);
}
if (!eng)
if (!q)
mutex_lock(&m->job_mutex);
job = xe_bb_create_migration_job(eng ?: m->eng, bb,
job = xe_bb_create_migration_job(q ?: m->q, bb,
xe_migrate_batch_base(m, usm),
update_idx);
if (IS_ERR(job)) {
......@@ -1295,7 +1295,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
fence = dma_fence_get(&job->drm.s_fence->finished);
xe_sched_job_push(job);
if (!eng)
if (!q)
mutex_unlock(&m->job_mutex);
xe_bb_free(bb, fence);
......@@ -1306,7 +1306,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
err_job:
xe_sched_job_put(job);
err_bb:
if (!eng)
if (!q)
mutex_unlock(&m->job_mutex);
xe_bb_free(bb, NULL);
err:
......
......@@ -14,7 +14,7 @@ struct ttm_resource;
struct xe_bo;
struct xe_gt;
struct xe_engine;
struct xe_exec_queue;
struct xe_migrate;
struct xe_migrate_pt_update;
struct xe_sync_entry;
......@@ -97,7 +97,7 @@ struct dma_fence *
xe_migrate_update_pgtables(struct xe_migrate *m,
struct xe_vm *vm,
struct xe_bo *bo,
struct xe_engine *eng,
struct xe_exec_queue *q,
const struct xe_vm_pgtable_update *updates,
u32 num_updates,
struct xe_sync_entry *syncs, u32 num_syncs,
......@@ -105,5 +105,5 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
void xe_migrate_wait(struct xe_migrate *m);
struct xe_engine *xe_tile_migrate_engine(struct xe_tile *tile);
struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile);
#endif
......@@ -8,7 +8,7 @@
#include <linux/types.h>
struct xe_engine;
struct xe_exec_queue;
struct xe_gt;
void xe_mocs_init_early(struct xe_gt *gt);
......
......@@ -15,19 +15,19 @@ static void preempt_fence_work_func(struct work_struct *w)
bool cookie = dma_fence_begin_signalling();
struct xe_preempt_fence *pfence =
container_of(w, typeof(*pfence), preempt_work);
struct xe_engine *e = pfence->engine;
struct xe_exec_queue *q = pfence->q;
if (pfence->error)
dma_fence_set_error(&pfence->base, pfence->error);
else
e->ops->suspend_wait(e);
q->ops->suspend_wait(q);
dma_fence_signal(&pfence->base);
dma_fence_end_signalling(cookie);
xe_vm_queue_rebind_worker(e->vm);
xe_vm_queue_rebind_worker(q->vm);
xe_engine_put(e);
xe_exec_queue_put(q);
}
static const char *
......@@ -46,9 +46,9 @@ static bool preempt_fence_enable_signaling(struct dma_fence *fence)
{
struct xe_preempt_fence *pfence =
container_of(fence, typeof(*pfence), base);
struct xe_engine *e = pfence->engine;
struct xe_exec_queue *q = pfence->q;
pfence->error = e->ops->suspend(e);
pfence->error = q->ops->suspend(q);
queue_work(system_unbound_wq, &pfence->preempt_work);
return true;
}
......@@ -104,43 +104,43 @@ void xe_preempt_fence_free(struct xe_preempt_fence *pfence)
* xe_preempt_fence_alloc().
* @pfence: The struct xe_preempt_fence pointer returned from
* xe_preempt_fence_alloc().
* @e: The struct xe_engine used for arming.
* @q: The struct xe_exec_queue used for arming.
* @context: The dma-fence context used for arming.
* @seqno: The dma-fence seqno used for arming.
*
* Inserts the preempt fence into @context's timeline, takes @link off any
* list, and registers the struct xe_engine as the xe_engine to be preempted.
* list, and registers the struct xe_exec_queue as the xe_engine to be preempted.
*
* Return: A pointer to a struct dma_fence embedded into the preempt fence.
* This function doesn't error.
*/
struct dma_fence *
xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_engine *e,
xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
u64 context, u32 seqno)
{
list_del_init(&pfence->link);
pfence->engine = xe_engine_get(e);
pfence->q = xe_exec_queue_get(q);
dma_fence_init(&pfence->base, &preempt_fence_ops,
&e->compute.lock, context, seqno);
&q->compute.lock, context, seqno);
return &pfence->base;
}
/**
* xe_preempt_fence_create() - Helper to create and arm a preempt fence.
* @e: The struct xe_engine used for arming.
* @q: The struct xe_exec_queue used for arming.
* @context: The dma-fence context used for arming.
* @seqno: The dma-fence seqno used for arming.
*
* Allocates and inserts the preempt fence into @context's timeline,
* and registers @e as the struct xe_engine to be preempted.
* and registers @e as the struct xe_exec_queue to be preempted.
*
* Return: A pointer to the resulting struct dma_fence on success. An error
* pointer on error. In particular if allocation fails it returns
* ERR_PTR(-ENOMEM);
*/
struct dma_fence *
xe_preempt_fence_create(struct xe_engine *e,
xe_preempt_fence_create(struct xe_exec_queue *q,
u64 context, u32 seqno)
{
struct xe_preempt_fence *pfence;
......@@ -149,7 +149,7 @@ xe_preempt_fence_create(struct xe_engine *e,
if (IS_ERR(pfence))
return ERR_CAST(pfence);
return xe_preempt_fence_arm(pfence, e, context, seqno);
return xe_preempt_fence_arm(pfence, q, context, seqno);
}
bool xe_fence_is_xe_preempt(const struct dma_fence *fence)
......
......@@ -11,7 +11,7 @@
struct list_head;
struct dma_fence *
xe_preempt_fence_create(struct xe_engine *e,
xe_preempt_fence_create(struct xe_exec_queue *q,
u64 context, u32 seqno);
struct xe_preempt_fence *xe_preempt_fence_alloc(void);
......@@ -19,7 +19,7 @@ struct xe_preempt_fence *xe_preempt_fence_alloc(void);
void xe_preempt_fence_free(struct xe_preempt_fence *pfence);
struct dma_fence *
xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_engine *e,
xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
u64 context, u32 seqno);
static inline struct xe_preempt_fence *
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -203,7 +203,7 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
config->info[XE_QUERY_CONFIG_MEM_REGION_COUNT] =
hweight_long(xe->info.mem_region_mask);
config->info[XE_QUERY_CONFIG_MAX_ENGINE_PRIORITY] =
xe_engine_device_get_max_priority(xe);
xe_exec_queue_device_get_max_priority(xe);
if (copy_to_user(query_ptr, config, size)) {
kfree(config);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment