Commit 69882565 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: add optional ring to *_hdp callbacks

This adds an optional ring to the invalidate_hdp and flush_hdp
callbacks. If the ring isn't specified or the emit_wreg function not
available the HDP operation will be done with the CPU otherwise by
writing on the ring.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 3c9d1fde
...@@ -1221,9 +1221,10 @@ struct amdgpu_asic_funcs { ...@@ -1221,9 +1221,10 @@ struct amdgpu_asic_funcs {
/* get config memsize register */ /* get config memsize register */
u32 (*get_config_memsize)(struct amdgpu_device *adev); u32 (*get_config_memsize)(struct amdgpu_device *adev);
/* flush hdp write queue */ /* flush hdp write queue */
void (*flush_hdp)(struct amdgpu_device *adev); void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
/* invalidate hdp read cache */ /* invalidate hdp read cache */
void (*invalidate_hdp)(struct amdgpu_device *adev); void (*invalidate_hdp)(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
}; };
/* /*
...@@ -1367,7 +1368,7 @@ struct amdgpu_nbio_funcs { ...@@ -1367,7 +1368,7 @@ struct amdgpu_nbio_funcs {
u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
u32 (*get_rev_id)(struct amdgpu_device *adev); u32 (*get_rev_id)(struct amdgpu_device *adev);
void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
void (*hdp_flush)(struct amdgpu_device *adev); void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
u32 (*get_memsize)(struct amdgpu_device *adev); u32 (*get_memsize)(struct amdgpu_device *adev);
void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index); bool use_doorbell, int doorbell_index);
...@@ -1774,8 +1775,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) ...@@ -1774,8 +1775,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
#define amdgpu_asic_flush_hdp(adev) (adev)->asic_funcs->flush_hdp((adev)) #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
#define amdgpu_asic_invalidate_hdp(adev) (adev)->asic_funcs->invalidate_hdp((adev)) #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, pasid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (pasid), (addr)) #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, pasid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (pasid), (addr))
#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
......
...@@ -247,7 +247,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, ...@@ -247,7 +247,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
} }
} }
mb(); mb();
amdgpu_asic_flush_hdp(adev); amdgpu_asic_flush_hdp(adev, NULL);
amdgpu_gmc_flush_gpu_tlb(adev, 0); amdgpu_gmc_flush_gpu_tlb(adev, 0);
return 0; return 0;
} }
...@@ -330,7 +330,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, ...@@ -330,7 +330,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
return r; return r;
mb(); mb();
amdgpu_asic_flush_hdp(adev); amdgpu_asic_flush_hdp(adev, NULL);
amdgpu_gmc_flush_gpu_tlb(adev, 0); amdgpu_gmc_flush_gpu_tlb(adev, 0);
return 0; return 0;
} }
......
...@@ -854,7 +854,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, ...@@ -854,7 +854,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
if (vm->use_cpu_for_update) { if (vm->use_cpu_for_update) {
/* Flush HDP */ /* Flush HDP */
mb(); mb();
amdgpu_asic_flush_hdp(adev); amdgpu_asic_flush_hdp(adev, NULL);
} else if (params.ib->length_dw == 0) { } else if (params.ib->length_dw == 0) {
amdgpu_job_free(job); amdgpu_job_free(job);
} else { } else {
...@@ -1436,7 +1436,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ...@@ -1436,7 +1436,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
if (vm->use_cpu_for_update) { if (vm->use_cpu_for_update) {
/* Flush HDP */ /* Flush HDP */
mb(); mb();
amdgpu_asic_flush_hdp(adev); amdgpu_asic_flush_hdp(adev, NULL);
} }
spin_lock(&vm->status_lock); spin_lock(&vm->status_lock);
......
...@@ -1715,16 +1715,25 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev) ...@@ -1715,16 +1715,25 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
} }
static void cik_flush_hdp(struct amdgpu_device *adev) static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{ {
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); if (!ring || !ring->funcs->emit_wreg) {
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL); WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
}
} }
static void cik_invalidate_hdp(struct amdgpu_device *adev) static void cik_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{ {
WREG32(mmHDP_DEBUG0, 1); if (!ring || !ring->funcs->emit_wreg) {
RREG32(mmHDP_DEBUG0); WREG32(mmHDP_DEBUG0, 1);
RREG32(mmHDP_DEBUG0);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
}
} }
static const struct amdgpu_asic_funcs cik_asic_funcs = static const struct amdgpu_asic_funcs cik_asic_funcs =
......
...@@ -1009,7 +1009,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) ...@@ -1009,7 +1009,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
/* After HDP is initialized, flush HDP.*/ /* After HDP is initialized, flush HDP.*/
adev->nbio_funcs->hdp_flush(adev); adev->nbio_funcs->hdp_flush(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false; value = false;
......
...@@ -53,9 +53,16 @@ static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable) ...@@ -53,9 +53,16 @@ static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
} }
static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev) static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{ {
WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0); if (!ring || !ring->funcs->emit_wreg)
WREG32_SOC15_NO_KIQ(NBIO, 0,
mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL,
0);
else
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
} }
static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev) static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
......
...@@ -53,9 +53,14 @@ static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable) ...@@ -53,9 +53,14 @@ static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
} }
static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev) static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{ {
WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); if (!ring || !ring->funcs->emit_wreg)
WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
else
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0);
} }
static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev) static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
......
...@@ -1230,16 +1230,25 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev) ...@@ -1230,16 +1230,25 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
} }
static void si_flush_hdp(struct amdgpu_device *adev) static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{ {
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); if (!ring || !ring->funcs->emit_wreg) {
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL); WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
}
} }
static void si_invalidate_hdp(struct amdgpu_device *adev) static void si_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{ {
WREG32(mmHDP_DEBUG0, 1); if (!ring || !ring->funcs->emit_wreg) {
RREG32(mmHDP_DEBUG0); WREG32(mmHDP_DEBUG0, 1);
RREG32(mmHDP_DEBUG0);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
}
} }
static const struct amdgpu_asic_funcs si_asic_funcs = static const struct amdgpu_asic_funcs si_asic_funcs =
......
...@@ -583,14 +583,19 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) ...@@ -583,14 +583,19 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
return adev->nbio_funcs->get_rev_id(adev); return adev->nbio_funcs->get_rev_id(adev);
} }
static void soc15_flush_hdp(struct amdgpu_device *adev) static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{ {
adev->nbio_funcs->hdp_flush(adev); adev->nbio_funcs->hdp_flush(adev, ring);
} }
static void soc15_invalidate_hdp(struct amdgpu_device *adev) static void soc15_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{ {
WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); if (!ring || !ring->funcs->emit_wreg)
WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
else
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
} }
static const struct amdgpu_asic_funcs soc15_asic_funcs = static const struct amdgpu_asic_funcs soc15_asic_funcs =
......
...@@ -856,16 +856,25 @@ static uint32_t vi_get_rev_id(struct amdgpu_device *adev) ...@@ -856,16 +856,25 @@ static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
} }
static void vi_flush_hdp(struct amdgpu_device *adev) static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{ {
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); if (!ring || !ring->funcs->emit_wreg) {
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL); WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
}
} }
static void vi_invalidate_hdp(struct amdgpu_device *adev) static void vi_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{ {
WREG32(mmHDP_DEBUG0, 1); if (!ring || !ring->funcs->emit_wreg) {
RREG32(mmHDP_DEBUG0); WREG32(mmHDP_DEBUG0, 1);
RREG32(mmHDP_DEBUG0);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
}
} }
static const struct amdgpu_asic_funcs vi_asic_funcs = static const struct amdgpu_asic_funcs vi_asic_funcs =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment