Commit d03f5d59 authored by Alex Deucher's avatar Alex Deucher Committed by Dave Airlie

drm/radeon: fixes for r6xx/r7xx gfx init

- updated swizzle modes for backend map setup
- fix programming of a few gfx regs
- properly handle pipe/backend setup on LE cards
Signed-off-by: default avatarAlex Deucher <alexdeucher@gmail.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 6271901d
...@@ -980,6 +980,9 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -980,6 +980,9 @@ void r600_gpu_init(struct radeon_device *rdev)
{ {
u32 tiling_config; u32 tiling_config;
u32 ramcfg; u32 ramcfg;
u32 backend_map;
u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config;
u32 tmp; u32 tmp;
int i, j; int i, j;
u32 sq_config; u32 sq_config;
...@@ -1076,23 +1079,20 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -1076,23 +1079,20 @@ void r600_gpu_init(struct radeon_device *rdev)
switch (rdev->config.r600.max_tile_pipes) { switch (rdev->config.r600.max_tile_pipes) {
case 1: case 1:
tiling_config |= PIPE_TILING(0); tiling_config |= PIPE_TILING(0);
rdev->config.r600.tiling_npipes = 1;
break; break;
case 2: case 2:
tiling_config |= PIPE_TILING(1); tiling_config |= PIPE_TILING(1);
rdev->config.r600.tiling_npipes = 2;
break; break;
case 4: case 4:
tiling_config |= PIPE_TILING(2); tiling_config |= PIPE_TILING(2);
rdev->config.r600.tiling_npipes = 4;
break; break;
case 8: case 8:
tiling_config |= PIPE_TILING(3); tiling_config |= PIPE_TILING(3);
rdev->config.r600.tiling_npipes = 8;
break; break;
default: default:
break; break;
} }
rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= GROUP_SIZE(0); tiling_config |= GROUP_SIZE(0);
...@@ -1106,24 +1106,33 @@ void r600_gpu_init(struct radeon_device *rdev) ...@@ -1106,24 +1106,33 @@ void r600_gpu_init(struct radeon_device *rdev)
tiling_config |= SAMPLE_SPLIT(tmp); tiling_config |= SAMPLE_SPLIT(tmp);
} }
tiling_config |= BANK_SWAPS(1); tiling_config |= BANK_SWAPS(1);
tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
rdev->config.r600.max_backends, cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
(0xff << rdev->config.r600.max_backends) & 0xff); cc_rb_backend_disable |=
tiling_config |= BACKEND_MAP(tmp); BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
(R6XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable &
R6XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config); WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
WREG32(CC_RB_BACKEND_DISABLE, tmp);
/* Setup pipes */ /* Setup pipes */
tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK); WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK); WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK); tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
......
This diff is collapsed.
...@@ -274,9 +274,10 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev) ...@@ -274,9 +274,10 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
/* /*
* Core functions * Core functions
*/ */
static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
u32 num_backends, u32 num_tile_pipes,
u32 backend_disable_mask) u32 num_backends,
u32 backend_disable_mask)
{ {
u32 backend_map = 0; u32 backend_map = 0;
u32 enabled_backends_mask; u32 enabled_backends_mask;
...@@ -285,6 +286,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, ...@@ -285,6 +286,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
u32 swizzle_pipe[R7XX_MAX_PIPES]; u32 swizzle_pipe[R7XX_MAX_PIPES];
u32 cur_backend; u32 cur_backend;
u32 i; u32 i;
bool force_no_swizzle;
if (num_tile_pipes > R7XX_MAX_PIPES) if (num_tile_pipes > R7XX_MAX_PIPES)
num_tile_pipes = R7XX_MAX_PIPES; num_tile_pipes = R7XX_MAX_PIPES;
...@@ -314,6 +316,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, ...@@ -314,6 +316,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
if (enabled_backends_count != num_backends) if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count; num_backends = enabled_backends_count;
switch (rdev->family) {
case CHIP_RV770:
case CHIP_RV730:
force_no_swizzle = false;
break;
case CHIP_RV710:
case CHIP_RV740:
default:
force_no_swizzle = true;
break;
}
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES); memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
switch (num_tile_pipes) { switch (num_tile_pipes) {
case 1: case 1:
...@@ -324,49 +338,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, ...@@ -324,49 +338,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
swizzle_pipe[1] = 1; swizzle_pipe[1] = 1;
break; break;
case 3: case 3:
swizzle_pipe[0] = 0; if (force_no_swizzle) {
swizzle_pipe[1] = 2; swizzle_pipe[0] = 0;
swizzle_pipe[2] = 1; swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
}
break; break;
case 4: case 4:
swizzle_pipe[0] = 0; if (force_no_swizzle) {
swizzle_pipe[1] = 2; swizzle_pipe[0] = 0;
swizzle_pipe[2] = 3; swizzle_pipe[1] = 1;
swizzle_pipe[3] = 1; swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 3;
swizzle_pipe[3] = 1;
}
break; break;
case 5: case 5:
swizzle_pipe[0] = 0; if (force_no_swizzle) {
swizzle_pipe[1] = 2; swizzle_pipe[0] = 0;
swizzle_pipe[2] = 4; swizzle_pipe[1] = 1;
swizzle_pipe[3] = 1; swizzle_pipe[2] = 2;
swizzle_pipe[4] = 3; swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
}
break; break;
case 6: case 6:
swizzle_pipe[0] = 0; if (force_no_swizzle) {
swizzle_pipe[1] = 2; swizzle_pipe[0] = 0;
swizzle_pipe[2] = 4; swizzle_pipe[1] = 1;
swizzle_pipe[3] = 5; swizzle_pipe[2] = 2;
swizzle_pipe[4] = 3; swizzle_pipe[3] = 3;
swizzle_pipe[5] = 1; swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 5;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
}
break; break;
case 7: case 7:
swizzle_pipe[0] = 0; if (force_no_swizzle) {
swizzle_pipe[1] = 2; swizzle_pipe[0] = 0;
swizzle_pipe[2] = 4; swizzle_pipe[1] = 1;
swizzle_pipe[3] = 6; swizzle_pipe[2] = 2;
swizzle_pipe[4] = 3; swizzle_pipe[3] = 3;
swizzle_pipe[5] = 1; swizzle_pipe[4] = 4;
swizzle_pipe[6] = 5; swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 5;
}
break; break;
case 8: case 8:
swizzle_pipe[0] = 0; if (force_no_swizzle) {
swizzle_pipe[1] = 2; swizzle_pipe[0] = 0;
swizzle_pipe[2] = 4; swizzle_pipe[1] = 1;
swizzle_pipe[3] = 6; swizzle_pipe[2] = 2;
swizzle_pipe[4] = 3; swizzle_pipe[3] = 3;
swizzle_pipe[5] = 1; swizzle_pipe[4] = 4;
swizzle_pipe[6] = 7; swizzle_pipe[5] = 5;
swizzle_pipe[7] = 5; swizzle_pipe[6] = 6;
swizzle_pipe[7] = 7;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 7;
swizzle_pipe[7] = 5;
}
break; break;
} }
...@@ -386,8 +451,10 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, ...@@ -386,8 +451,10 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
static void rv770_gpu_init(struct radeon_device *rdev) static void rv770_gpu_init(struct radeon_device *rdev)
{ {
int i, j, num_qd_pipes; int i, j, num_qd_pipes;
u32 ta_aux_cntl;
u32 sx_debug_1; u32 sx_debug_1;
u32 smx_dc_ctl0; u32 smx_dc_ctl0;
u32 db_debug3;
u32 num_gs_verts_per_thread; u32 num_gs_verts_per_thread;
u32 vgt_gs_per_es; u32 vgt_gs_per_es;
u32 gs_prim_buffer_depth = 0; u32 gs_prim_buffer_depth = 0;
...@@ -516,24 +583,20 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -516,24 +583,20 @@ static void rv770_gpu_init(struct radeon_device *rdev)
switch (rdev->config.rv770.max_tile_pipes) { switch (rdev->config.rv770.max_tile_pipes) {
case 1: case 1:
default:
gb_tiling_config |= PIPE_TILING(0); gb_tiling_config |= PIPE_TILING(0);
rdev->config.rv770.tiling_npipes = 1;
break; break;
case 2: case 2:
gb_tiling_config |= PIPE_TILING(1); gb_tiling_config |= PIPE_TILING(1);
rdev->config.rv770.tiling_npipes = 2;
break; break;
case 4: case 4:
gb_tiling_config |= PIPE_TILING(2); gb_tiling_config |= PIPE_TILING(2);
rdev->config.rv770.tiling_npipes = 4;
break; break;
case 8: case 8:
gb_tiling_config |= PIPE_TILING(3); gb_tiling_config |= PIPE_TILING(3);
rdev->config.rv770.tiling_npipes = 8;
break;
default:
break; break;
} }
rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
if (rdev->family == CHIP_RV770) if (rdev->family == CHIP_RV770)
gb_tiling_config |= BANK_TILING(1); gb_tiling_config |= BANK_TILING(1);
...@@ -556,21 +619,27 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -556,21 +619,27 @@ static void rv770_gpu_init(struct radeon_device *rdev)
gb_tiling_config |= BANK_SWAPS(1); gb_tiling_config |= BANK_SWAPS(1);
if (rdev->family == CHIP_RV740) cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
backend_map = 0x28; cc_rb_backend_disable |=
else BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
rdev->config.rv770.max_backends,
(0xff << rdev->config.rv770.max_backends) & 0xff);
gb_tiling_config |= BACKEND_MAP(backend_map);
cc_gc_shader_pipe_config = cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK); INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |= cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK); INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
cc_rb_backend_disable = if (rdev->family == CHIP_RV740)
BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK); backend_map = 0x28;
else
backend_map = r700_get_tile_pipe_to_backend_map(rdev,
rdev->config.rv770.max_tile_pipes,
(R7XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable &
R7XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
gb_tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, gb_tiling_config); WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
...@@ -578,16 +647,13 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -578,16 +647,13 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CGTS_SYS_TCC_DISABLE, 0); WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0); WREG32(CGTS_TCC_DISABLE, 0);
WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
WREG32(CGTS_USER_TCC_DISABLE, 0);
num_qd_pipes = num_qd_pipes =
R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK); R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
...@@ -597,10 +663,8 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -597,10 +663,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | ta_aux_cntl = RREG32(TA_CNTL_AUX);
SYNC_GRADIENT | WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
SYNC_WALKER |
SYNC_ALIGNER));
sx_debug_1 = RREG32(SX_DEBUG_1); sx_debug_1 = RREG32(SX_DEBUG_1);
sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
...@@ -611,14 +675,28 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -611,14 +675,28 @@ static void rv770_gpu_init(struct radeon_device *rdev)
smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1); smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
WREG32(SMX_DC_CTL0, smx_dc_ctl0); WREG32(SMX_DC_CTL0, smx_dc_ctl0);
WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) | if (rdev->family != CHIP_RV740)
GS_FLUSH_CTL(4) | WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
ACK_FLUSH_CTL(3) | GS_FLUSH_CTL(4) |
SYNC_FLUSH_CTL)); ACK_FLUSH_CTL(3) |
SYNC_FLUSH_CTL));
if (rdev->family == CHIP_RV770) db_debug3 = RREG32(DB_DEBUG3);
WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f)); db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
else { switch (rdev->family) {
case CHIP_RV770:
case CHIP_RV740:
db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
break;
case CHIP_RV710:
case CHIP_RV730:
default:
db_debug3 |= DB_CLK_OFF_DELAY(2);
break;
}
WREG32(DB_DEBUG3, db_debug3);
if (rdev->family != CHIP_RV770) {
db_debug4 = RREG32(DB_DEBUG4); db_debug4 = RREG32(DB_DEBUG4);
db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER; db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
WREG32(DB_DEBUG4, db_debug4); WREG32(DB_DEBUG4, db_debug4);
...@@ -647,10 +725,10 @@ static void rv770_gpu_init(struct radeon_device *rdev) ...@@ -647,10 +725,10 @@ static void rv770_gpu_init(struct radeon_device *rdev)
ALU_UPDATE_FIFO_HIWATER(0x8)); ALU_UPDATE_FIFO_HIWATER(0x8));
switch (rdev->family) { switch (rdev->family) {
case CHIP_RV770: case CHIP_RV770:
sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
break;
case CHIP_RV730: case CHIP_RV730:
case CHIP_RV710: case CHIP_RV710:
sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
break;
case CHIP_RV740: case CHIP_RV740:
default: default:
sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4); sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment