Commit b672cb1e authored by Philip Yang's avatar Philip Yang Committed by Alex Deucher

drm/amdgpu: enable retry fault wptr overflow

If xnack is on, VM retry fault interrupt send to IH ring1, and ring1
will be full quickly. IH cannot receive other interrupts, this causes
deadlock if migrating buffer using sdma and waiting for sdma done while
handling retry fault.

Remove VMC from IH storm client, enable ring1 write pointer overflow,
then IH will drop retry fault interrupts and be able to receive other
interrupts while driver is handling retry fault.

IH ring1 write pointer doesn't writeback to memory by IH, and ring1
write pointer recorded by self-irq is not updated, so always read
the latest ring1 write pointer from register.
Signed-off-by: default avatarPhilip Yang <Philip.Yang@amd.com>
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent df23d1bb
...@@ -220,10 +220,8 @@ static int vega10_ih_enable_ring(struct amdgpu_device *adev, ...@@ -220,10 +220,8 @@ static int vega10_ih_enable_ring(struct amdgpu_device *adev,
tmp = vega10_ih_rb_cntl(ih, tmp); tmp = vega10_ih_rb_cntl(ih, tmp);
if (ih == &adev->irq.ih) if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
if (ih == &adev->irq.ih1) { if (ih == &adev->irq.ih1)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
}
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n"); dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
...@@ -265,7 +263,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) ...@@ -265,7 +263,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
u32 ih_chicken; u32 ih_chicken;
int ret; int ret;
int i; int i;
u32 tmp;
/* disable irqs */ /* disable irqs */
ret = vega10_ih_toggle_interrupts(adev, false); ret = vega10_ih_toggle_interrupts(adev, false);
...@@ -291,15 +288,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) ...@@ -291,15 +288,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
} }
} }
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
CLIENT18_IS_STORM_CLIENT, 1);
WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
pci_set_master(adev->pdev); pci_set_master(adev->pdev);
/* enable interrupts */ /* enable interrupts */
...@@ -345,11 +333,17 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev, ...@@ -345,11 +333,17 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp; u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs; struct amdgpu_ih_regs *ih_regs;
if (ih == &adev->irq.ih) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
*/
wptr = le32_to_cpu(*ih->wptr_cpu); wptr = le32_to_cpu(*ih->wptr_cpu);
ih_regs = &ih->ih_regs;
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out; goto out;
}
ih_regs = &ih->ih_regs;
/* Double check that the overflow wasn't already cleared. */ /* Double check that the overflow wasn't already cleared. */
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
...@@ -440,15 +434,11 @@ static int vega10_ih_self_irq(struct amdgpu_device *adev, ...@@ -440,15 +434,11 @@ static int vega10_ih_self_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
uint32_t wptr = cpu_to_le32(entry->src_data[0]);
switch (entry->ring_id) { switch (entry->ring_id) {
case 1: case 1:
*adev->irq.ih1.wptr_cpu = wptr;
schedule_work(&adev->irq.ih1_work); schedule_work(&adev->irq.ih1_work);
break; break;
case 2: case 2:
*adev->irq.ih2.wptr_cpu = wptr;
schedule_work(&adev->irq.ih2_work); schedule_work(&adev->irq.ih2_work);
break; break;
default: break; default: break;
......
...@@ -223,10 +223,8 @@ static int vega20_ih_enable_ring(struct amdgpu_device *adev, ...@@ -223,10 +223,8 @@ static int vega20_ih_enable_ring(struct amdgpu_device *adev,
tmp = vega20_ih_rb_cntl(ih, tmp); tmp = vega20_ih_rb_cntl(ih, tmp);
if (ih == &adev->irq.ih) if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
if (ih == &adev->irq.ih1) { if (ih == &adev->irq.ih1)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
}
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n"); dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
...@@ -300,7 +298,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) ...@@ -300,7 +298,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
u32 ih_chicken; u32 ih_chicken;
int ret; int ret;
int i; int i;
u32 tmp;
/* disable irqs */ /* disable irqs */
ret = vega20_ih_toggle_interrupts(adev, false); ret = vega20_ih_toggle_interrupts(adev, false);
...@@ -341,15 +338,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) ...@@ -341,15 +338,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
} }
} }
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
CLIENT18_IS_STORM_CLIENT, 1);
WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
pci_set_master(adev->pdev); pci_set_master(adev->pdev);
/* enable interrupts */ /* enable interrupts */
...@@ -395,11 +383,17 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev, ...@@ -395,11 +383,17 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp; u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs; struct amdgpu_ih_regs *ih_regs;
if (ih == &adev->irq.ih) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
*/
wptr = le32_to_cpu(*ih->wptr_cpu); wptr = le32_to_cpu(*ih->wptr_cpu);
ih_regs = &ih->ih_regs;
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out; goto out;
}
ih_regs = &ih->ih_regs;
/* Double check that the overflow wasn't already cleared. */ /* Double check that the overflow wasn't already cleared. */
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
...@@ -491,15 +485,11 @@ static int vega20_ih_self_irq(struct amdgpu_device *adev, ...@@ -491,15 +485,11 @@ static int vega20_ih_self_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
uint32_t wptr = cpu_to_le32(entry->src_data[0]);
switch (entry->ring_id) { switch (entry->ring_id) {
case 1: case 1:
*adev->irq.ih1.wptr_cpu = wptr;
schedule_work(&adev->irq.ih1_work); schedule_work(&adev->irq.ih1_work);
break; break;
case 2: case 2:
*adev->irq.ih2.wptr_cpu = wptr;
schedule_work(&adev->irq.ih2_work); schedule_work(&adev->irq.ih2_work);
break; break;
default: break; default: break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment