Commit 0d8e4eb3 authored by Lang Yu's avatar Lang Yu Committed by Alex Deucher

drm/amdgpu: add workarounds for VCN TMZ issue on CHIP_RAVEN

It is a hardware issue that VCN can't handle a GTT
backing stored TMZ buffer on CHIP_RAVEN series ASIC.

Move such a TMZ buffer to VRAM domain before command
submission as a workaround.

v2:
 - Use patch_cs_in_place callback.

v3:
 - Bail out early if unsecure IBs.
Suggested-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarLang Yu <Lang.Yu@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
parent b818a5d3
......@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_cs.h"
#include "amdgpu_vcn.h"
#include "amdgpu_pm.h"
#include "soc15.h"
......@@ -1900,6 +1901,75 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.set_powergating_state = vcn_v1_0_set_powergating_state,
};
/*
* It is a hardware issue that VCN can't handle a GTT TMZ buffer on
* CHIP_RAVEN series ASIC. Move such a GTT TMZ buffer to VRAM domain
* before command submission as a workaround.
*/
static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser *parser,
struct amdgpu_job *job,
uint64_t addr)
{
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
int r;
addr &= AMDGPU_GMC_HOLE_MASK;
if (addr & 0x7) {
DRM_ERROR("VCN messages must be 8 byte aligned!\n");
return -EINVAL;
}
mapping = amdgpu_vm_bo_lookup_mapping(vm, addr/AMDGPU_GPU_PAGE_SIZE);
if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
return -EINVAL;
bo = mapping->bo_va->base.bo;
if (!(bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED))
return 0;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r) {
DRM_ERROR("Failed to validate the VCN message BO (%d)!\n", r);
return r;
}
return r;
}
static int vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_job *job,
struct amdgpu_ib *ib)
{
uint32_t msg_lo = 0, msg_hi = 0;
int i, r;
if (!(ib->flags & AMDGPU_IB_FLAGS_SECURE))
return 0;
for (i = 0; i < ib->length_dw; i += 2) {
uint32_t reg = amdgpu_ib_get_value(ib, i);
uint32_t val = amdgpu_ib_get_value(ib, i + 1);
if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
msg_lo = val;
} else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
msg_hi = val;
} else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0)) {
r = vcn_v1_0_validate_bo(p, job,
((u64)msg_hi) << 32 | msg_lo);
if (r)
return r;
}
}
return 0;
}
static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
......@@ -1910,6 +1980,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.get_rptr = vcn_v1_0_dec_ring_get_rptr,
.get_wptr = vcn_v1_0_dec_ring_get_wptr,
.set_wptr = vcn_v1_0_dec_ring_set_wptr,
.patch_cs_in_place = vcn_v1_0_ring_patch_cs_in_place,
.emit_frame_size =
6 + 6 + /* hdp invalidate / flush */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment