Commit 571c0536 authored by Alex Deucher's avatar Alex Deucher

drm/amdgpu: switch sdma buffer function tear down to a helper

Switch all of the SDMA implementations to use the helper to
tear down the ttm buffer manager.
Tested-by: default avatarBokun Zhang <Bokun.Zhang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e5da6519
...@@ -285,3 +285,24 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, ...@@ -285,3 +285,24 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
} }
return err; return err;
} }
void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
{
struct amdgpu_ring *sdma;
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
if (adev->sdma.has_page_queue) {
sdma = &adev->sdma.instance[i].page;
if (adev->mman.buffer_funcs_ring == sdma) {
amdgpu_ttm_set_buffer_funcs_status(adev, false);
break;
}
}
sdma = &adev->sdma.instance[i].ring;
if (adev->mman.buffer_funcs_ring == sdma) {
amdgpu_ttm_set_buffer_funcs_status(adev, false);
break;
}
}
}
...@@ -128,4 +128,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, ...@@ -128,4 +128,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
char *fw_name, u32 instance, bool duplicate); char *fw_name, u32 instance, bool duplicate);
void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev, void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
bool duplicate); bool duplicate);
void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
#endif #endif
...@@ -309,14 +309,10 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq ...@@ -309,14 +309,10 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
*/ */
static void cik_sdma_gfx_stop(struct amdgpu_device *adev) static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl; u32 rb_cntl;
int i; int i;
if ((adev->mman.buffer_funcs_ring == sdma0) || amdgpu_sdma_unset_buffer_funcs_helper(adev);
(adev->mman.buffer_funcs_ring == sdma1))
amdgpu_ttm_set_buffer_funcs_status(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
......
...@@ -342,14 +342,10 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se ...@@ -342,14 +342,10 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/ */
static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl; u32 rb_cntl, ib_cntl;
int i; int i;
if ((adev->mman.buffer_funcs_ring == sdma0) || amdgpu_sdma_unset_buffer_funcs_helper(adev);
(adev->mman.buffer_funcs_ring == sdma1))
amdgpu_ttm_set_buffer_funcs_status(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
......
...@@ -516,14 +516,10 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se ...@@ -516,14 +516,10 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/ */
static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl; u32 rb_cntl, ib_cntl;
int i; int i;
if ((adev->mman.buffer_funcs_ring == sdma0) || amdgpu_sdma_unset_buffer_funcs_helper(adev);
(adev->mman.buffer_funcs_ring == sdma1))
amdgpu_ttm_set_buffer_funcs_status(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
......
...@@ -915,18 +915,12 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se ...@@ -915,18 +915,12 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/ */
static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev) static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl; u32 rb_cntl, ib_cntl;
int i, unset = 0; int i;
for (i = 0; i < adev->sdma.num_instances; i++) { amdgpu_sdma_unset_buffer_funcs_helper(adev);
sdma[i] = &adev->sdma.instance[i].ring;
if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
amdgpu_ttm_set_buffer_funcs_status(adev, false);
unset = 1;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL); rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl); WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
...@@ -957,20 +951,12 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev) ...@@ -957,20 +951,12 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
*/ */
static void sdma_v4_0_page_stop(struct amdgpu_device *adev) static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl; u32 rb_cntl, ib_cntl;
int i; int i;
bool unset = false;
for (i = 0; i < adev->sdma.num_instances; i++) { amdgpu_sdma_unset_buffer_funcs_helper(adev);
sdma[i] = &adev->sdma.instance[i].page;
if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
(!unset)) {
amdgpu_ttm_set_buffer_funcs_status(adev, false);
unset = true;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL); rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
RB_ENABLE, 0); RB_ENABLE, 0);
......
...@@ -584,14 +584,10 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se ...@@ -584,14 +584,10 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/ */
static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev) static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl; u32 rb_cntl, ib_cntl;
int i; int i;
if ((adev->mman.buffer_funcs_ring == sdma0) || amdgpu_sdma_unset_buffer_funcs_helper(adev);
(adev->mman.buffer_funcs_ring == sdma1))
amdgpu_ttm_set_buffer_funcs_status(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
......
...@@ -414,18 +414,10 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se ...@@ -414,18 +414,10 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/ */
static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev) static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
struct amdgpu_ring *sdma2 = &adev->sdma.instance[2].ring;
struct amdgpu_ring *sdma3 = &adev->sdma.instance[3].ring;
u32 rb_cntl, ib_cntl; u32 rb_cntl, ib_cntl;
int i; int i;
if ((adev->mman.buffer_funcs_ring == sdma0) || amdgpu_sdma_unset_buffer_funcs_helper(adev);
(adev->mman.buffer_funcs_ring == sdma1) ||
(adev->mman.buffer_funcs_ring == sdma2) ||
(adev->mman.buffer_funcs_ring == sdma3))
amdgpu_ttm_set_buffer_funcs_status(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
......
...@@ -398,14 +398,10 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se ...@@ -398,14 +398,10 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/ */
static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev) static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl; u32 rb_cntl, ib_cntl;
int i; int i;
if ((adev->mman.buffer_funcs_ring == sdma0) || amdgpu_sdma_unset_buffer_funcs_helper(adev);
(adev->mman.buffer_funcs_ring == sdma1))
amdgpu_ttm_set_buffer_funcs_status(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL)); rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
...@@ -415,9 +411,6 @@ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev) ...@@ -415,9 +411,6 @@ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl); WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
} }
sdma0->sched.ready = false;
sdma1->sched.ready = false;
} }
/** /**
......
...@@ -116,15 +116,14 @@ static void si_dma_stop(struct amdgpu_device *adev) ...@@ -116,15 +116,14 @@ static void si_dma_stop(struct amdgpu_device *adev)
u32 rb_cntl; u32 rb_cntl;
unsigned i; unsigned i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring; ring = &adev->sdma.instance[i].ring;
/* dma0 */ /* dma0 */
rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]); rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
rb_cntl &= ~DMA_RB_ENABLE; rb_cntl &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment