Commit f7d66fb2 authored by Christian König's avatar Christian König

drm/amdgpu: cleanup scheduler job initialization v2

Init the DRM scheduler base class while allocating the job.

This makes the whole handling much more cleaner.

v2: fix coding style
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarLuben Tuikov <luben.tuikov@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221014084641.128280-7-christian.koenig@amd.com
parent 940ca22b
...@@ -674,7 +674,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, ...@@ -674,7 +674,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
goto err; goto err;
} }
ret = amdgpu_job_alloc(adev, 1, &job, NULL); ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
if (ret) if (ret)
goto err; goto err;
......
...@@ -291,12 +291,8 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, ...@@ -291,12 +291,8 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
return -EINVAL; return -EINVAL;
for (i = 0; i < p->gang_size; ++i) { for (i = 0; i < p->gang_size; ++i) {
ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm); ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
if (ret) num_ibs[i], &p->jobs[i]);
goto free_all_kdata;
ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i],
&fpriv->vm);
if (ret) if (ret)
goto free_all_kdata; goto free_all_kdata;
} }
......
...@@ -89,8 +89,9 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) ...@@ -89,8 +89,9 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
return DRM_GPU_SCHED_STAT_NOMINAL; return DRM_GPU_SCHED_STAT_NOMINAL;
} }
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_job **job, struct amdgpu_vm *vm) struct drm_sched_entity *entity, void *owner,
unsigned int num_ibs, struct amdgpu_job **job)
{ {
if (num_ibs == 0) if (num_ibs == 0)
return -EINVAL; return -EINVAL;
...@@ -111,23 +112,30 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, ...@@ -111,23 +112,30 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
(*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
return 0; if (!entity)
return 0;
return drm_sched_job_init(&(*job)->base, entity, owner);
} }
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
enum amdgpu_ib_pool_type pool_type, struct drm_sched_entity *entity, void *owner,
struct amdgpu_job **job) size_t size, enum amdgpu_ib_pool_type pool_type,
struct amdgpu_job **job)
{ {
int r; int r;
r = amdgpu_job_alloc(adev, 1, job, NULL); r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
if (r) if (r)
return r; return r;
(*job)->num_ibs = 1; (*job)->num_ibs = 1;
r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
if (r) if (r) {
if (entity)
drm_sched_job_cleanup(&(*job)->base);
kfree(*job); kfree(*job);
}
return r; return r;
} }
...@@ -191,6 +199,9 @@ void amdgpu_job_set_gang_leader(struct amdgpu_job *job, ...@@ -191,6 +199,9 @@ void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
void amdgpu_job_free(struct amdgpu_job *job) void amdgpu_job_free(struct amdgpu_job *job)
{ {
if (job->base.entity)
drm_sched_job_cleanup(&job->base);
amdgpu_job_free_resources(job); amdgpu_job_free_resources(job);
amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync); amdgpu_sync_free(&job->sched_sync);
...@@ -203,25 +214,16 @@ void amdgpu_job_free(struct amdgpu_job *job) ...@@ -203,25 +214,16 @@ void amdgpu_job_free(struct amdgpu_job *job)
dma_fence_put(&job->hw_fence); dma_fence_put(&job->hw_fence);
} }
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
void *owner, struct dma_fence **f)
{ {
int r; struct dma_fence *f;
if (!f)
return -EINVAL;
r = drm_sched_job_init(&job->base, entity, owner);
if (r)
return r;
drm_sched_job_arm(&job->base); drm_sched_job_arm(&job->base);
f = dma_fence_get(&job->base.s_fence->finished);
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job); amdgpu_job_free_resources(job);
drm_sched_entity_push_job(&job->base); drm_sched_entity_push_job(&job->base);
return 0; return f;
} }
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
......
...@@ -78,18 +78,20 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job) ...@@ -78,18 +78,20 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
return to_amdgpu_ring(job->base.entity->rq->sched); return to_amdgpu_ring(job->base.entity->rq->sched);
} }
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_job **job, struct amdgpu_vm *vm); struct drm_sched_entity *entity, void *owner,
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, unsigned int num_ibs, struct amdgpu_job **job);
enum amdgpu_ib_pool_type pool, struct amdgpu_job **job); int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
struct amdgpu_job **job);
void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
struct amdgpu_bo *gws, struct amdgpu_bo *oa); struct amdgpu_bo *gws, struct amdgpu_bo *oa);
void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_set_gang_leader(struct amdgpu_job *job, void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
struct amdgpu_job *leader); struct amdgpu_job *leader);
void amdgpu_job_free(struct amdgpu_job *job); void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job);
void *owner, struct dma_fence **f);
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct dma_fence **fence); struct dma_fence **fence);
......
...@@ -150,14 +150,15 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -150,14 +150,15 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
const unsigned ib_size_dw = 16; const unsigned ib_size_dw = 16;
int i, r; int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
AMDGPU_IB_POOL_DIRECT, &job); AMDGPU_IB_POOL_DIRECT, &job);
if (r) if (r)
return r; return r;
ib = &job->ibs[0]; ib = &job->ibs[0];
ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0); ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0,
PACKETJ_TYPE0);
ib->ptr[1] = 0xDEADBEEF; ib->ptr[1] = 0xDEADBEEF;
for (i = 2; i < 16; i += 2) { for (i = 2; i < 16; i += 2) {
ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
......
...@@ -189,7 +189,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, ...@@ -189,7 +189,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
unsigned offset, num_pages, num_dw, num_bytes; unsigned offset, num_pages, num_dw, num_bytes;
uint64_t src_addr, dst_addr; uint64_t src_addr, dst_addr;
struct dma_fence *fence;
struct amdgpu_job *job; struct amdgpu_job *job;
void *cpu_addr; void *cpu_addr;
uint64_t flags; uint64_t flags;
...@@ -229,7 +228,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, ...@@ -229,7 +228,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE; num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED, &job); AMDGPU_IB_POOL_DELAYED, &job);
if (r) if (r)
return r; return r;
...@@ -269,18 +270,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, ...@@ -269,18 +270,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
} }
} }
r = amdgpu_job_submit(job, &adev->mman.entity, dma_fence_put(amdgpu_job_submit(job));
AMDGPU_FENCE_OWNER_UNDEFINED, &fence); return 0;
if (r)
goto error_free;
dma_fence_put(fence);
return r;
error_free:
amdgpu_job_free(job);
return r;
} }
/** /**
...@@ -1414,7 +1405,8 @@ static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos, ...@@ -1414,7 +1405,8 @@ static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
} }
static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
unsigned long offset, void *buf, int len, int write) unsigned long offset, void *buf,
int len, int write)
{ {
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
...@@ -1438,26 +1430,27 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, ...@@ -1438,26 +1430,27 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
memcpy(adev->mman.sdma_access_ptr, buf, len); memcpy(adev->mman.sdma_access_ptr, buf, len);
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job); r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4, AMDGPU_IB_POOL_DELAYED,
&job);
if (r) if (r)
goto out; goto out;
amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm); amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start; src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
src_mm.start;
dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo); dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
if (write) if (write)
swap(src_addr, dst_addr); swap(src_addr, dst_addr);
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false); amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
PAGE_SIZE, false);
amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]); amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw); WARN_ON(job->ibs[0].length_dw > num_dw);
r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence); fence = amdgpu_job_submit(job);
if (r) {
amdgpu_job_free(job);
goto out;
}
if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout)) if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
r = -ETIMEDOUT; r = -ETIMEDOUT;
...@@ -1956,7 +1949,9 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, ...@@ -1956,7 +1949,9 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
AMDGPU_IB_POOL_DELAYED; AMDGPU_IB_POOL_DELAYED;
int r; int r;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job); r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4, pool, job);
if (r) if (r)
return r; return r;
...@@ -2015,8 +2010,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, ...@@ -2015,8 +2010,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
if (direct_submit) if (direct_submit)
r = amdgpu_job_submit_direct(job, ring, fence); r = amdgpu_job_submit_direct(job, ring, fence);
else else
r = amdgpu_job_submit(job, &adev->mman.entity, *fence = amdgpu_job_submit(job);
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r) if (r)
goto error_free; goto error_free;
...@@ -2061,16 +2055,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, ...@@ -2061,16 +2055,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
amdgpu_ring_pad_ib(ring, &job->ibs[0]); amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw); WARN_ON(job->ibs[0].length_dw > num_dw);
r = amdgpu_job_submit(job, &adev->mman.entity, *fence = amdgpu_job_submit(job);
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
if (r)
goto error_free;
return 0; return 0;
error_free:
amdgpu_job_free(job);
return r;
} }
int amdgpu_fill_buffer(struct amdgpu_bo *bo, int amdgpu_fill_buffer(struct amdgpu_bo *bo,
......
...@@ -1132,7 +1132,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -1132,7 +1132,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
unsigned offset_idx = 0; unsigned offset_idx = 0;
unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT : r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
64, direct ? AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED, &job); AMDGPU_IB_POOL_DELAYED, &job);
if (r) if (r)
return r; return r;
...@@ -1181,10 +1183,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -1181,10 +1183,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r) if (r)
goto err_free; goto err_free;
r = amdgpu_job_submit(job, &adev->uvd.entity, f = amdgpu_job_submit(job);
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r)
goto err_free;
} }
amdgpu_bo_reserve(bo, true); amdgpu_bo_reserve(bo, true);
......
...@@ -450,8 +450,10 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -450,8 +450,10 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
uint64_t addr; uint64_t addr;
int i, r; int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
AMDGPU_IB_POOL_DIRECT, &job); AMDGPU_FENCE_OWNER_UNDEFINED,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
&job);
if (r) if (r)
return r; return r;
...@@ -538,7 +540,9 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -538,7 +540,9 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence *f = NULL; struct dma_fence *f = NULL;
int i, r; int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
ib_size_dw * 4,
direct ? AMDGPU_IB_POOL_DIRECT : direct ? AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED, &job); AMDGPU_IB_POOL_DELAYED, &job);
if (r) if (r)
...@@ -570,8 +574,7 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -570,8 +574,7 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
if (direct) if (direct)
r = amdgpu_job_submit_direct(job, ring, &f); r = amdgpu_job_submit_direct(job, ring, &f);
else else
r = amdgpu_job_submit(job, &ring->adev->vce.entity, f = amdgpu_job_submit(job);
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r) if (r)
goto err; goto err;
......
...@@ -600,15 +600,16 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, ...@@ -600,15 +600,16 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
struct amdgpu_ib *ib_msg, struct amdgpu_ib *ib_msg,
struct dma_fence **fence) struct dma_fence **fence)
{ {
u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct dma_fence *f = NULL; struct dma_fence *f = NULL;
struct amdgpu_job *job; struct amdgpu_job *job;
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
int i, r; int i, r;
r = amdgpu_job_alloc_with_ib(adev, 64, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
AMDGPU_IB_POOL_DIRECT, &job); 64, AMDGPU_IB_POOL_DIRECT,
&job);
if (r) if (r)
goto err; goto err;
...@@ -787,8 +788,9 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, ...@@ -787,8 +788,9 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
if (sq) if (sq)
ib_size_dw += 8; ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
AMDGPU_IB_POOL_DIRECT, &job); ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
&job);
if (r) if (r)
goto err; goto err;
...@@ -916,8 +918,9 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand ...@@ -916,8 +918,9 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
if (sq) if (sq)
ib_size_dw += 8; ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
AMDGPU_IB_POOL_DIRECT, &job); ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
&job);
if (r) if (r)
return r; return r;
...@@ -982,8 +985,9 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han ...@@ -982,8 +985,9 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
if (sq) if (sq)
ib_size_dw += 8; ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
AMDGPU_IB_POOL_DIRECT, &job); ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
&job);
if (r) if (r)
return r; return r;
......
...@@ -47,6 +47,32 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table) ...@@ -47,6 +47,32 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
return r; return r;
} }
/* Allocate a new job for @count PTE updates */
static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
unsigned int count)
{
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate
: &p->vm->delayed;
unsigned int ndw;
int r;
/* estimate how many dw we need */
ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
if (p->pages_addr)
ndw += count * 2;
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
ndw * 4, pool, &p->job);
if (r)
return r;
p->num_dw_left = ndw;
return 0;
}
/** /**
* amdgpu_vm_sdma_prepare - prepare SDMA command submission * amdgpu_vm_sdma_prepare - prepare SDMA command submission
* *
...@@ -61,17 +87,12 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, ...@@ -61,17 +87,12 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
struct dma_resv *resv, struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode) enum amdgpu_sync_mode sync_mode)
{ {
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
int r; int r;
r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job); r = amdgpu_vm_sdma_alloc_job(p, 0);
if (r) if (r)
return r; return r;
p->num_dw_left = ndw;
if (!resv) if (!resv)
return 0; return 0;
...@@ -91,20 +112,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, ...@@ -91,20 +112,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_ib *ib = p->job->ibs; struct amdgpu_ib *ib = p->job->ibs;
struct drm_sched_entity *entity;
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
struct dma_fence *f; struct dma_fence *f;
int r;
entity = p->immediate ? &p->vm->immediate : &p->vm->delayed; ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
ring = container_of(entity->rq->sched, struct amdgpu_ring, sched); sched);
WARN_ON(ib->length_dw == 0); WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib); amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > p->num_dw_left); WARN_ON(ib->length_dw > p->num_dw_left);
r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f); f = amdgpu_job_submit(p->job);
if (r)
goto error;
if (p->unlocked) { if (p->unlocked) {
struct dma_fence *tmp = dma_fence_get(f); struct dma_fence *tmp = dma_fence_get(f);
...@@ -120,10 +137,6 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, ...@@ -120,10 +137,6 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
swap(*fence, f); swap(*fence, f);
dma_fence_put(f); dma_fence_put(f);
return 0; return 0;
error:
amdgpu_job_free(p->job);
return r;
} }
/** /**
...@@ -203,8 +216,6 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, ...@@ -203,8 +216,6 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
uint64_t flags) uint64_t flags)
{ {
struct amdgpu_bo *bo = &vmbo->bo; struct amdgpu_bo *bo = &vmbo->bo;
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
struct dma_resv_iter cursor; struct dma_resv_iter cursor;
unsigned int i, ndw, nptes; unsigned int i, ndw, nptes;
struct dma_fence *fence; struct dma_fence *fence;
...@@ -231,19 +242,9 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, ...@@ -231,19 +242,9 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
if (r) if (r)
return r; return r;
/* estimate how many dw we need */ r = amdgpu_vm_sdma_alloc_job(p, count);
ndw = 32;
if (p->pages_addr)
ndw += count * 2;
ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool,
&p->job);
if (r) if (r)
return r; return r;
p->num_dw_left = ndw;
} }
if (!p->pages_addr) { if (!p->pages_addr) {
......
...@@ -371,7 +371,9 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ...@@ -371,7 +371,9 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* translation. Avoid this by doing the invalidation from the SDMA * translation. Avoid this by doing the invalidation from the SDMA
* itself. * itself.
*/ */
r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
&job); &job);
if (r) if (r)
goto error_alloc; goto error_alloc;
...@@ -380,10 +382,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ...@@ -380,10 +382,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
job->vm_needs_flush = true; job->vm_needs_flush = true;
job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop; job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
amdgpu_ring_pad_ib(ring, &job->ibs[0]); amdgpu_ring_pad_ib(ring, &job->ibs[0]);
r = amdgpu_job_submit(job, &adev->mman.entity, fence = amdgpu_job_submit(job);
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
if (r)
goto error_submit;
mutex_unlock(&adev->mman.gtt_window_lock); mutex_unlock(&adev->mman.gtt_window_lock);
...@@ -392,9 +391,6 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, ...@@ -392,9 +391,6 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
return; return;
error_submit:
amdgpu_job_free(job);
error_alloc: error_alloc:
mutex_unlock(&adev->mman.gtt_window_lock); mutex_unlock(&adev->mman.gtt_window_lock);
DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r); DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
......
...@@ -216,8 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle ...@@ -216,8 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
uint64_t addr; uint64_t addr;
int i, r; int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
AMDGPU_IB_POOL_DIRECT, &job); AMDGPU_IB_POOL_DIRECT, &job);
if (r) if (r)
return r; return r;
...@@ -280,8 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, ...@@ -280,8 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
uint64_t addr; uint64_t addr;
int i, r; int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
AMDGPU_IB_POOL_DIRECT, &job); AMDGPU_IB_POOL_DIRECT, &job);
if (r) if (r)
return r; return r;
......
...@@ -213,7 +213,7 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) ...@@ -213,7 +213,7 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
* *
* Open up a stream for HW test * Open up a stream for HW test
*/ */
static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
struct amdgpu_bo *bo, struct amdgpu_bo *bo,
struct dma_fence **fence) struct dma_fence **fence)
{ {
...@@ -224,8 +224,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle ...@@ -224,8 +224,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
uint64_t addr; uint64_t addr;
int i, r; int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
AMDGPU_IB_POOL_DIRECT, &job); AMDGPU_IB_POOL_DIRECT, &job);
if (r) if (r)
return r; return r;
...@@ -276,7 +276,7 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle ...@@ -276,7 +276,7 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
* *
* Close up a stream for HW test or if userspace failed to do so * Close up a stream for HW test or if userspace failed to do so
*/ */
static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
struct amdgpu_bo *bo, struct amdgpu_bo *bo,
struct dma_fence **fence) struct dma_fence **fence)
{ {
...@@ -287,8 +287,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl ...@@ -287,8 +287,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
uint64_t addr; uint64_t addr;
int i, r; int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
AMDGPU_IB_POOL_DIRECT, &job); AMDGPU_IB_POOL_DIRECT, &job);
if (r) if (r)
return r; return r;
......
...@@ -65,8 +65,11 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, ...@@ -65,8 +65,11 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = npages * 8; num_bytes = npages * 8;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
AMDGPU_IB_POOL_DELAYED, &job); AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED,
&job);
if (r) if (r)
return r; return r;
...@@ -89,18 +92,10 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, ...@@ -89,18 +92,10 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
cpu_addr = &job->ibs[0].ptr[num_dw]; cpu_addr = &job->ibs[0].ptr[num_dw];
amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
r = amdgpu_job_submit(job, &adev->mman.entity, fence = amdgpu_job_submit(job);
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
if (r)
goto error_free;
dma_fence_put(fence); dma_fence_put(fence);
return r; return r;
error_free:
amdgpu_job_free(job);
return r;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment