Commit 405a81ae authored by xinhui pan's avatar xinhui pan Committed by Alex Deucher

drm/amdgpu: VCN avoid memory allocation during IB test

alloc extra msg from direct IB pool.
Signed-off-by: default avatarxinhui pan <xinhui.pan@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent cb9038aa
......@@ -541,15 +541,14 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
}
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *f = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
uint64_t addr;
void *msg = NULL;
uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
int i, r;
r = amdgpu_job_alloc_with_ib(adev, 64,
......@@ -558,8 +557,6 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
goto err;
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
msg = amdgpu_bo_kptr(bo);
ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
ib->ptr[1] = addr;
ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
......@@ -576,9 +573,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo);
amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
amdgpu_ib_free(adev, ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
......@@ -588,27 +583,26 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
err_free:
amdgpu_job_free(job);
err:
amdgpu_bo_unreserve(bo);
amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
amdgpu_ib_free(adev, ib_msg, f);
return r;
}
static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_bo **bo)
struct amdgpu_ib *ib)
{
struct amdgpu_device *adev = ring->adev;
uint32_t *msg;
int r, i;
*bo = NULL;
r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
bo, NULL, (void **)&msg);
memset(ib, 0, sizeof(*ib));
r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
AMDGPU_IB_POOL_DIRECT,
ib);
if (r)
return r;
msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000038);
msg[2] = cpu_to_le32(0x00000001);
......@@ -630,19 +624,20 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
}
static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_bo **bo)
struct amdgpu_ib *ib)
{
struct amdgpu_device *adev = ring->adev;
uint32_t *msg;
int r, i;
*bo = NULL;
r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
bo, NULL, (void **)&msg);
memset(ib, 0, sizeof(*ib));
r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
AMDGPU_IB_POOL_DIRECT,
ib);
if (r)
return r;
msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000018);
msg[2] = cpu_to_le32(0x00000000);
......@@ -658,21 +653,21 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
struct amdgpu_bo *bo;
struct amdgpu_ib ib;
long r;
r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
if (r)
goto error;
r = amdgpu_vcn_dec_send_msg(ring, bo, NULL);
r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
if (r)
goto error;
r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo);
r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
if (r)
goto error;
r = amdgpu_vcn_dec_send_msg(ring, bo, &fence);
r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
if (r)
goto error;
......@@ -688,8 +683,8 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct dma_fence **fence)
struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
const unsigned int ib_size_dw = 64;
......@@ -697,7 +692,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
struct dma_fence *f = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
uint64_t addr;
uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
int i, r;
r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
......@@ -706,7 +701,6 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
goto err;
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
......@@ -726,9 +720,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
amdgpu_ib_free(adev, ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
......@@ -738,31 +730,29 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
err_free:
amdgpu_job_free(job);
err:
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
amdgpu_ib_free(adev, ib_msg, f);
return r;
}
int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
struct amdgpu_bo *bo;
struct amdgpu_ib ib;
long r;
r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
if (r)
goto error;
r = amdgpu_vcn_dec_sw_send_msg(ring, bo, NULL);
r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
if (r)
goto error;
r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo);
r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
if (r)
goto error;
r = amdgpu_vcn_dec_sw_send_msg(ring, bo, &fence);
r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
if (r)
goto error;
......@@ -809,7 +799,7 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
}
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_bo *bo,
struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
......@@ -825,7 +815,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
return r;
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
......@@ -863,7 +853,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
}
static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_bo *bo,
struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
......@@ -879,7 +869,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
return r;
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
......@@ -918,21 +908,23 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL;
struct amdgpu_bo *bo = NULL;
struct amdgpu_ib ib;
long r;
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL, NULL);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
AMDGPU_IB_POOL_DIRECT,
&ib);
if (r)
return r;
r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
if (r)
goto error;
r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
if (r)
goto error;
......@@ -943,9 +935,8 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
error:
amdgpu_ib_free(adev, &ib, fence);
dma_fence_put(fence);
amdgpu_bo_unreserve(bo);
amdgpu_bo_free_kernel(&bo, NULL, NULL);
return r;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment