Commit bb977d37 authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher

drm/amdgpu: abstract amdgpu_job for scheduler

Signed-off-by: default avatarChunming Zhou <david1.zhou@amd.com>
Reviewed-by: default avatarChristian K?nig <christian.koenig@amd.com>
parent 6055f37a
...@@ -183,6 +183,7 @@ struct amdgpu_vm; ...@@ -183,6 +183,7 @@ struct amdgpu_vm;
struct amdgpu_ring; struct amdgpu_ring;
struct amdgpu_semaphore; struct amdgpu_semaphore;
struct amdgpu_cs_parser; struct amdgpu_cs_parser;
struct amdgpu_job;
struct amdgpu_irq_src; struct amdgpu_irq_src;
struct amdgpu_fpriv; struct amdgpu_fpriv;
...@@ -871,7 +872,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, ...@@ -871,7 +872,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
struct amdgpu_ring *ring, struct amdgpu_ring *ring,
struct amdgpu_ib *ibs, struct amdgpu_ib *ibs,
unsigned num_ibs, unsigned num_ibs,
int (*free_job)(struct amdgpu_cs_parser *), int (*free_job)(struct amdgpu_job *),
void *owner, void *owner,
struct fence **fence); struct fence **fence);
...@@ -1040,6 +1041,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx); ...@@ -1040,6 +1041,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx); int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
struct amdgpu_ctx *amdgpu_ctx_get_ref(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct fence *fence, uint64_t queued_seq); struct fence *fence, uint64_t queued_seq);
...@@ -1265,6 +1267,18 @@ struct amdgpu_cs_parser { ...@@ -1265,6 +1267,18 @@ struct amdgpu_cs_parser {
struct amd_sched_fence *s_fence; struct amd_sched_fence *s_fence;
}; };
struct amdgpu_job {
struct amd_sched_job base;
struct amdgpu_device *adev;
struct amdgpu_ctx *ctx;
struct drm_file *owner;
struct amdgpu_ib *ibs;
uint32_t num_ibs;
struct mutex job_lock;
struct amdgpu_user_fence uf;
int (*free_job)(struct amdgpu_job *sched_job);
};
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
{ {
return p->ibs[ib_idx].ptr[idx]; return p->ibs[ib_idx].ptr[idx];
......
...@@ -126,19 +126,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, ...@@ -126,19 +126,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
return 0; return 0;
} }
static void amdgpu_job_work_func(struct work_struct *work)
{
struct amdgpu_cs_parser *sched_job =
container_of(work, struct amdgpu_cs_parser,
job_work);
mutex_lock(&sched_job->job_lock);
if (sched_job->free_job)
sched_job->free_job(sched_job);
mutex_unlock(&sched_job->job_lock);
/* after processing job, free memory */
fence_put(&sched_job->s_fence->base);
kfree(sched_job);
}
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
struct drm_file *filp, struct drm_file *filp,
struct amdgpu_ctx *ctx, struct amdgpu_ctx *ctx,
...@@ -157,10 +144,6 @@ struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, ...@@ -157,10 +144,6 @@ struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
parser->ctx = ctx; parser->ctx = ctx;
parser->ibs = ibs; parser->ibs = ibs;
parser->num_ibs = num_ibs; parser->num_ibs = num_ibs;
if (amdgpu_enable_scheduler) {
mutex_init(&parser->job_lock);
INIT_WORK(&parser->job_work, amdgpu_job_work_func);
}
for (i = 0; i < num_ibs; i++) for (i = 0; i < num_ibs; i++)
ibs[i].ctx = ctx; ibs[i].ctx = ctx;
...@@ -508,15 +491,17 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) ...@@ -508,15 +491,17 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
for (i = 0; i < parser->nchunks; i++) for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata); drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks); kfree(parser->chunks);
if (parser->ibs)
for (i = 0; i < parser->num_ibs; i++)
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
kfree(parser->ibs);
if (parser->uf.bo)
drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
if (!amdgpu_enable_scheduler) if (!amdgpu_enable_scheduler)
kfree(parser); {
if (parser->ibs)
for (i = 0; i < parser->num_ibs; i++)
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
kfree(parser->ibs);
if (parser->uf.bo)
drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
}
kfree(parser);
} }
/** /**
...@@ -533,12 +518,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo ...@@ -533,12 +518,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
amdgpu_cs_parser_fini_late(parser); amdgpu_cs_parser_fini_late(parser);
} }
static int amdgpu_cs_parser_free_job(struct amdgpu_cs_parser *sched_job)
{
amdgpu_cs_parser_fini_late(sched_job);
return 0;
}
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
struct amdgpu_vm *vm) struct amdgpu_vm *vm)
{ {
...@@ -874,6 +853,19 @@ static struct amdgpu_ring *amdgpu_cs_parser_get_ring( ...@@ -874,6 +853,19 @@ static struct amdgpu_ring *amdgpu_cs_parser_get_ring(
return ring; return ring;
} }
static int amdgpu_cs_free_job(struct amdgpu_job *sched_job)
{
int i;
amdgpu_ctx_put(sched_job->ctx);
if (sched_job->ibs)
for (i = 0; i < sched_job->num_ibs; i++)
amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
kfree(sched_job->ibs);
if (sched_job->uf.bo)
drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base);
return 0;
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
...@@ -900,33 +892,50 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -900,33 +892,50 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
} }
if (amdgpu_enable_scheduler && parser->num_ibs) { if (amdgpu_enable_scheduler && parser->num_ibs) {
struct amdgpu_job *job;
struct amdgpu_ring * ring = struct amdgpu_ring * ring =
amdgpu_cs_parser_get_ring(adev, parser); amdgpu_cs_parser_get_ring(adev, parser);
r = amdgpu_cs_parser_prepare_job(parser); r = amdgpu_cs_parser_prepare_job(parser);
if (r) if (r)
goto out; goto out;
parser->ring = ring; job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
parser->free_job = amdgpu_cs_parser_free_job; if (!job)
mutex_lock(&parser->job_lock); return -ENOMEM;
r = amd_sched_push_job(ring->scheduler, job->base.sched = ring->scheduler;
&parser->ctx->rings[ring->idx].entity, job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
parser, job->adev = parser->adev;
&parser->s_fence); job->ibs = parser->ibs;
job->num_ibs = parser->num_ibs;
job->owner = parser->filp;
job->ctx = amdgpu_ctx_get_ref(parser->ctx);
mutex_init(&job->job_lock);
if (job->ibs[job->num_ibs - 1].user) {
memcpy(&job->uf, &parser->uf,
sizeof(struct amdgpu_user_fence));
job->ibs[job->num_ibs - 1].user = &job->uf;
}
job->free_job = amdgpu_cs_free_job;
mutex_lock(&job->job_lock);
r = amd_sched_push_job((struct amd_sched_job *)job);
if (r) { if (r) {
mutex_unlock(&parser->job_lock); mutex_unlock(&job->job_lock);
amdgpu_cs_free_job(job);
kfree(job);
goto out; goto out;
} }
parser->ibs[parser->num_ibs - 1].sequence = job->ibs[parser->num_ibs - 1].sequence =
amdgpu_ctx_add_fence(parser->ctx, ring, amdgpu_ctx_add_fence(job->ctx, ring,
&parser->s_fence->base, &job->base.s_fence->base,
parser->s_fence->v_seq); job->base.s_fence->v_seq);
cs->out.handle = parser->s_fence->v_seq; cs->out.handle = job->base.s_fence->v_seq;
list_sort(NULL, &parser->validated, cmp_size_smaller_first); list_sort(NULL, &parser->validated, cmp_size_smaller_first);
ttm_eu_fence_buffer_objects(&parser->ticket, ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated, &parser->validated,
&parser->s_fence->base); &job->base.s_fence->base);
mutex_unlock(&parser->job_lock); mutex_unlock(&job->job_lock);
amdgpu_cs_parser_fini_late(parser);
up_read(&adev->exclusive_lock); up_read(&adev->exclusive_lock);
return 0; return 0;
} }
......
...@@ -219,6 +219,13 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) ...@@ -219,6 +219,13 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
return ctx; return ctx;
} }
struct amdgpu_ctx *amdgpu_ctx_get_ref(struct amdgpu_ctx *ctx)
{
if (ctx)
kref_get(&ctx->refcount);
return ctx;
}
int amdgpu_ctx_put(struct amdgpu_ctx *ctx) int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
{ {
if (ctx == NULL) if (ctx == NULL)
......
...@@ -27,81 +27,58 @@ ...@@ -27,81 +27,58 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "amdgpu.h" #include "amdgpu.h"
static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity,
struct amd_sched_job *job)
{
int r = 0;
struct amdgpu_cs_parser *sched_job;
if (!job || !job->data) {
DRM_ERROR("job is null\n");
return -EINVAL;
}
sched_job = (struct amdgpu_cs_parser *)job->data;
if (sched_job->prepare_job) {
r = sched_job->prepare_job(sched_job);
if (r) {
DRM_ERROR("Prepare job error\n");
schedule_work(&sched_job->job_work);
}
}
return r;
}
static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity, struct amd_sched_entity *entity,
struct amd_sched_job *job) struct amd_sched_job *job)
{ {
int r = 0; int r = 0;
struct amdgpu_cs_parser *sched_job; struct amdgpu_job *sched_job;
struct amdgpu_fence *fence; struct amdgpu_fence *fence;
if (!job || !job->data) { if (!job) {
DRM_ERROR("job is null\n"); DRM_ERROR("job is null\n");
return NULL; return NULL;
} }
sched_job = (struct amdgpu_cs_parser *)job->data; sched_job = (struct amdgpu_job *)job;
mutex_lock(&sched_job->job_lock); mutex_lock(&sched_job->job_lock);
r = amdgpu_ib_schedule(sched_job->adev, r = amdgpu_ib_schedule(sched_job->adev,
sched_job->num_ibs, sched_job->num_ibs,
sched_job->ibs, sched_job->ibs,
sched_job->filp); sched_job->owner);
if (r) if (r)
goto err; goto err;
fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
if (sched_job->run_job) {
r = sched_job->run_job(sched_job);
if (r)
goto err;
}
mutex_unlock(&sched_job->job_lock); mutex_unlock(&sched_job->job_lock);
return &fence->base; return &fence->base;
err: err:
DRM_ERROR("Run job error\n"); DRM_ERROR("Run job error\n");
mutex_unlock(&sched_job->job_lock); mutex_unlock(&sched_job->job_lock);
schedule_work(&sched_job->job_work); sched->ops->process_job(sched, (struct amd_sched_job *)sched_job);
return NULL; return NULL;
} }
static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched,
struct amd_sched_job *job) struct amd_sched_job *job)
{ {
struct amdgpu_cs_parser *sched_job; struct amdgpu_job *sched_job;
if (!job || !job->data) { if (!job) {
DRM_ERROR("job is null\n"); DRM_ERROR("job is null\n");
return; return;
} }
sched_job = (struct amdgpu_cs_parser *)job->data; sched_job = (struct amdgpu_job *)job;
schedule_work(&sched_job->job_work); mutex_lock(&sched_job->job_lock);
if (sched_job->free_job)
sched_job->free_job(sched_job);
mutex_unlock(&sched_job->job_lock);
/* after processing job, free memory */
fence_put(&sched_job->base.s_fence->base);
kfree(sched_job);
} }
struct amd_sched_backend_ops amdgpu_sched_ops = { struct amd_sched_backend_ops amdgpu_sched_ops = {
.prepare_job = amdgpu_sched_prepare_job,
.run_job = amdgpu_sched_run_job, .run_job = amdgpu_sched_run_job,
.process_job = amdgpu_sched_process_job .process_job = amdgpu_sched_process_job
}; };
...@@ -110,31 +87,34 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, ...@@ -110,31 +87,34 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
struct amdgpu_ring *ring, struct amdgpu_ring *ring,
struct amdgpu_ib *ibs, struct amdgpu_ib *ibs,
unsigned num_ibs, unsigned num_ibs,
int (*free_job)(struct amdgpu_cs_parser *), int (*free_job)(struct amdgpu_job *),
void *owner, void *owner,
struct fence **f) struct fence **f)
{ {
int r = 0; int r = 0;
if (amdgpu_enable_scheduler) { if (amdgpu_enable_scheduler) {
struct amdgpu_cs_parser *sched_job = struct amdgpu_job *job =
amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx, kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
ibs, num_ibs); if (!job)
if(!sched_job) {
return -ENOMEM; return -ENOMEM;
} job->base.sched = ring->scheduler;
sched_job->free_job = free_job; job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
mutex_lock(&sched_job->job_lock); job->adev = adev;
r = amd_sched_push_job(ring->scheduler, job->ibs = ibs;
&adev->kernel_ctx.rings[ring->idx].entity, job->num_ibs = num_ibs;
sched_job, &sched_job->s_fence); job->owner = owner;
mutex_init(&job->job_lock);
job->free_job = free_job;
mutex_lock(&job->job_lock);
r = amd_sched_push_job((struct amd_sched_job *)job);
if (r) { if (r) {
mutex_unlock(&sched_job->job_lock); mutex_unlock(&job->job_lock);
kfree(sched_job); kfree(job);
return r; return r;
} }
ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq; ibs[num_ibs - 1].sequence = job->base.s_fence->v_seq;
*f = fence_get(&sched_job->s_fence->base); *f = fence_get(&job->base.s_fence->base);
mutex_unlock(&sched_job->job_lock); mutex_unlock(&job->job_lock);
} else { } else {
r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
if (r) if (r)
......
...@@ -807,7 +807,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) ...@@ -807,7 +807,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
} }
static int amdgpu_uvd_free_job( static int amdgpu_uvd_free_job(
struct amdgpu_cs_parser *sched_job) struct amdgpu_job *sched_job)
{ {
amdgpu_ib_free(sched_job->adev, sched_job->ibs); amdgpu_ib_free(sched_job->adev, sched_job->ibs);
kfree(sched_job->ibs); kfree(sched_job->ibs);
......
...@@ -340,7 +340,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) ...@@ -340,7 +340,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
} }
static int amdgpu_vce_free_job( static int amdgpu_vce_free_job(
struct amdgpu_cs_parser *sched_job) struct amdgpu_job *sched_job)
{ {
amdgpu_ib_free(sched_job->adev, sched_job->ibs); amdgpu_ib_free(sched_job->adev, sched_job->ibs);
kfree(sched_job->ibs); kfree(sched_job->ibs);
......
...@@ -307,7 +307,7 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, ...@@ -307,7 +307,7 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
} }
static int amdgpu_vm_free_job( static int amdgpu_vm_free_job(
struct amdgpu_cs_parser *sched_job) struct amdgpu_job *sched_job)
{ {
int i; int i;
for (i = 0; i < sched_job->num_ibs; i++) for (i = 0; i < sched_job->num_ibs; i++)
......
...@@ -282,30 +282,18 @@ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched, ...@@ -282,30 +282,18 @@ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
* scheduler consum some queued command. * scheduler consum some queued command.
* -1 other fail. * -1 other fail.
*/ */
int amd_sched_push_job(struct amd_gpu_scheduler *sched, int amd_sched_push_job(struct amd_sched_job *sched_job)
struct amd_sched_entity *c_entity,
void *data,
struct amd_sched_fence **fence)
{ {
struct amd_sched_job *job; struct amd_sched_fence *fence =
amd_sched_fence_create(sched_job->s_entity);
if (!fence) if (!fence)
return -EINVAL; return -EINVAL;
job = kzalloc(sizeof(struct amd_sched_job), GFP_KERNEL); fence_get(&fence->base);
if (!job) sched_job->s_fence = fence;
return -ENOMEM; while (kfifo_in_spinlocked(&sched_job->s_entity->job_queue,
job->sched = sched; &sched_job, sizeof(void *),
job->s_entity = c_entity; &sched_job->s_entity->queue_lock) !=
job->data = data; sizeof(void *)) {
*fence = amd_sched_fence_create(c_entity);
if ((*fence) == NULL) {
kfree(job);
return -EINVAL;
}
fence_get(&(*fence)->base);
job->s_fence = *fence;
while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
&c_entity->queue_lock) != sizeof(void *)) {
/** /**
* Current context used up all its IB slots * Current context used up all its IB slots
* wait here, or need to check whether GPU is hung * wait here, or need to check whether GPU is hung
...@@ -313,8 +301,8 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched, ...@@ -313,8 +301,8 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
schedule(); schedule();
} }
/* first job wake up scheduler */ /* first job wake up scheduler */
if ((kfifo_len(&c_entity->job_queue) / sizeof(void *)) == 1) if ((kfifo_len(&sched_job->s_entity->job_queue) / sizeof(void *)) == 1)
wake_up_interruptible(&sched->wait_queue); wake_up_interruptible(&sched_job->sched->wait_queue);
return 0; return 0;
} }
...@@ -333,10 +321,8 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) ...@@ -333,10 +321,8 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
list_del(&sched_job->list); list_del(&sched_job->list);
atomic64_dec(&sched->hw_rq_count); atomic64_dec(&sched->hw_rq_count);
spin_unlock_irqrestore(&sched->queue_lock, flags); spin_unlock_irqrestore(&sched->queue_lock, flags);
sched->ops->process_job(sched, sched_job);
fence_put(&sched_job->s_fence->base); fence_put(&sched_job->s_fence->base);
kfree(sched_job); sched->ops->process_job(sched, sched_job);
wake_up_interruptible(&sched->wait_queue); wake_up_interruptible(&sched->wait_queue);
} }
...@@ -359,7 +345,9 @@ static int amd_sched_main(void *param) ...@@ -359,7 +345,9 @@ static int amd_sched_main(void *param)
r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *)); r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *));
if (r != sizeof(void *)) if (r != sizeof(void *))
continue; continue;
r = sched->ops->prepare_job(sched, c_entity, job); r = 0;
if (sched->ops->prepare_job)
r = sched->ops->prepare_job(sched, c_entity, job);
if (!r) { if (!r) {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&sched->queue_lock, flags); spin_lock_irqsave(&sched->queue_lock, flags);
......
...@@ -81,7 +81,6 @@ struct amd_sched_job { ...@@ -81,7 +81,6 @@ struct amd_sched_job {
struct fence_cb cb; struct fence_cb cb;
struct amd_gpu_scheduler *sched; struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity; struct amd_sched_entity *s_entity;
void *data;
struct amd_sched_fence *s_fence; struct amd_sched_fence *s_fence;
}; };
...@@ -140,10 +139,7 @@ struct amd_gpu_scheduler *amd_sched_create(void *device, ...@@ -140,10 +139,7 @@ struct amd_gpu_scheduler *amd_sched_create(void *device,
uint32_t hw_submission); uint32_t hw_submission);
int amd_sched_destroy(struct amd_gpu_scheduler *sched); int amd_sched_destroy(struct amd_gpu_scheduler *sched);
int amd_sched_push_job(struct amd_gpu_scheduler *sched, int amd_sched_push_job(struct amd_sched_job *sched_job);
struct amd_sched_entity *c_entity,
void *data,
struct amd_sched_fence **fence);
int amd_sched_entity_init(struct amd_gpu_scheduler *sched, int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity, struct amd_sched_entity *entity,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment