Commit f091c1c7 authored by Tianci.Yin's avatar Tianci.Yin Committed by Alex Deucher

drm/amdgpu: disable 3D pipe 1 on Navi1x

[why]
CP firmware decide to skip setting the state for 3D pipe 1 for Navi1x as there
is no use case.

[how]
Disable 3D pipe 1 on Navi1x.
Reviewed-by: default avatarFeifei Xu <Feifei.Xu@amd.com>
Reviewed-by: default avatarMonk Liu <monk.liu@amd.com>
Signed-off-by: default avatarTianci.Yin <tianci.yin@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 0cf64555
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
* 1. Primary ring * 1. Primary ring
* 2. Async ring * 2. Async ring
*/ */
#define GFX10_NUM_GFX_RINGS 2 #define GFX10_NUM_GFX_RINGS_NV1X 1
#define GFX10_MEC_HPD_SIZE 2048 #define GFX10_MEC_HPD_SIZE 2048
#define F32_CE_PROGRAM_RAM_SIZE 65536 #define F32_CE_PROGRAM_RAM_SIZE 65536
...@@ -1308,7 +1308,7 @@ static int gfx_v10_0_sw_init(void *handle) ...@@ -1308,7 +1308,7 @@ static int gfx_v10_0_sw_init(void *handle)
case CHIP_NAVI14: case CHIP_NAVI14:
case CHIP_NAVI12: case CHIP_NAVI12:
adev->gfx.me.num_me = 1; adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 2; adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1; adev->gfx.me.num_queue_per_pipe = 1;
adev->gfx.mec.num_mec = 2; adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_pipe_per_mec = 4;
...@@ -2714,18 +2714,20 @@ static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev) ...@@ -2714,18 +2714,20 @@ static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_commit(ring); amdgpu_ring_commit(ring);
/* submit cs packet to copy state 0 to next available state */ /* submit cs packet to copy state 0 to next available state */
ring = &adev->gfx.gfx_ring[1]; if (adev->gfx.num_gfx_rings > 1) {
r = amdgpu_ring_alloc(ring, 2); /* maximum supported gfx ring is 2 */
if (r) { ring = &adev->gfx.gfx_ring[1];
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); r = amdgpu_ring_alloc(ring, 2);
return r; if (r) {
} DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
return r;
amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); }
amdgpu_ring_write(ring, 0);
amdgpu_ring_commit(ring); amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_commit(ring);
}
return 0; return 0;
} }
...@@ -2822,39 +2824,41 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) ...@@ -2822,39 +2824,41 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
/* Init gfx ring 1 for pipe 1 */ /* Init gfx ring 1 for pipe 1 */
mutex_lock(&adev->srbm_mutex); if (adev->gfx.num_gfx_rings > 1) {
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1); mutex_lock(&adev->srbm_mutex);
ring = &adev->gfx.gfx_ring[1]; gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
rb_bufsz = order_base_2(ring->ring_size / 8); /* maximum supported gfx ring is 2 */
tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); ring = &adev->gfx.gfx_ring[1];
tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); rb_bufsz = order_base_2(ring->ring_size / 8);
WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
/* Initialize the ring buffer's write pointers */ tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
ring->wptr = 0; WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr)); /* Initialize the ring buffer's write pointers */
WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); ring->wptr = 0;
/* Set the wb address wether it's enabled or not */ WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); /* Set the wb address wether it's enabled or not */
WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
lower_32_bits(wptr_gpu_addr)); wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
upper_32_bits(wptr_gpu_addr)); lower_32_bits(wptr_gpu_addr));
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
mdelay(1); upper_32_bits(wptr_gpu_addr));
WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
mdelay(1);
rb_addr = ring->gpu_addr >> 8; WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr)); rb_addr = ring->gpu_addr >> 8;
WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1); WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
gfx_v10_0_cp_gfx_set_doorbell(adev, ring); WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
mutex_unlock(&adev->srbm_mutex);
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
mutex_unlock(&adev->srbm_mutex);
}
/* Switch to pipe 0 */ /* Switch to pipe 0 */
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0); gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
...@@ -3966,7 +3970,8 @@ static int gfx_v10_0_early_init(void *handle) ...@@ -3966,7 +3970,8 @@ static int gfx_v10_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS; adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
gfx_v10_0_set_kiq_pm4_funcs(adev); gfx_v10_0_set_kiq_pm4_funcs(adev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment