Commit 4ef72566 authored by Christian König's avatar Christian König

drm/radeon: fix const IB handling v2

Const IBs are executed on the CE not the CP, so we can't
fence them in the normal way.

So submit them directly before the IB instead, just as
the documentation says.

v2: keep the extra documentation
Signed-off-by: default avatarChristian König <deathsimple@vodafone.de>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent bfb38d35
......@@ -3693,7 +3693,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[6] = PACKET2(0);
ib.ptr[7] = PACKET2(0);
ib.length_dw = 8;
r = radeon_ib_schedule(rdev, &ib);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
......
......@@ -2619,7 +2619,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
r = radeon_ib_schedule(rdev, &ib);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
......
......@@ -751,7 +751,8 @@ struct si_rlc {
int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_ring_tests(struct radeon_device *rdev);
......
......@@ -354,7 +354,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
}
radeon_cs_sync_rings(parser);
parser->ib.vm_id = 0;
r = radeon_ib_schedule(rdev, &parser->ib);
r = radeon_ib_schedule(rdev, &parser->ib, NULL);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
}
......@@ -452,25 +452,24 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
}
radeon_cs_sync_rings(parser);
parser->ib.vm_id = vm->id;
/* ib pool is bind at 0 in virtual address space,
* so gpu_addr is the offset inside the pool bo
*/
parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
parser->const_ib.vm_id = vm->id;
/* ib pool is bind at 0 in virtual address space to gpu_addr is the
* offset inside the pool bo
/* ib pool is bind at 0 in virtual address space,
* so gpu_addr is the offset inside the pool bo
*/
parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
r = radeon_ib_schedule(rdev, &parser->const_ib);
if (r)
goto out;
r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
} else {
r = radeon_ib_schedule(rdev, &parser->ib, NULL);
}
parser->ib.vm_id = vm->id;
/* ib pool is bind at 0 in virtual address space to gpu_addr is the
* offset inside the pool bo
*/
parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
parser->ib.is_const_ib = false;
r = radeon_ib_schedule(rdev, &parser->ib);
out:
if (!r) {
if (vm->fence) {
......
......@@ -74,7 +74,8 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_fence_unref(&ib->fence);
}
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
bool need_sync = false;
......@@ -105,6 +106,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
if (!need_sync) {
radeon_semaphore_free(rdev, &ib->semaphore, NULL);
}
if (const_ib) {
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
}
radeon_ring_ib_execute(rdev, ib->ring, ib);
r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
if (r) {
......@@ -112,6 +117,9 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_unlock_undo(rdev, ring);
return r;
}
if (const_ib) {
const_ib->fence = radeon_fence_ref(ib->fence);
}
radeon_ring_unlock_commit(rdev, ring);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment