Commit 9d0cb2c3 authored by Alex Deucher's avatar Alex Deucher

drm/amdgpu/gfx9.0: convert to IP version checking

Use IP versions rather than asic_type to differentiate
IP version specific features.
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 24be2d70
...@@ -953,8 +953,8 @@ static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) ...@@ -953,8 +953,8 @@ static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP]) {
case CHIP_VEGA10: case IP_VERSION(9, 0, 1):
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_gc_9_0, golden_settings_gc_9_0,
ARRAY_SIZE(golden_settings_gc_9_0)); ARRAY_SIZE(golden_settings_gc_9_0));
...@@ -962,7 +962,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -962,7 +962,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_0_vg10, golden_settings_gc_9_0_vg10,
ARRAY_SIZE(golden_settings_gc_9_0_vg10)); ARRAY_SIZE(golden_settings_gc_9_0_vg10));
break; break;
case CHIP_VEGA12: case IP_VERSION(9, 2, 1):
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_gc_9_2_1, golden_settings_gc_9_2_1,
ARRAY_SIZE(golden_settings_gc_9_2_1)); ARRAY_SIZE(golden_settings_gc_9_2_1));
...@@ -970,7 +970,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -970,7 +970,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_2_1_vg12, golden_settings_gc_9_2_1_vg12,
ARRAY_SIZE(golden_settings_gc_9_2_1_vg12)); ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
break; break;
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_gc_9_0, golden_settings_gc_9_0,
ARRAY_SIZE(golden_settings_gc_9_0)); ARRAY_SIZE(golden_settings_gc_9_0));
...@@ -978,12 +978,13 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -978,12 +978,13 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_0_vg20, golden_settings_gc_9_0_vg20,
ARRAY_SIZE(golden_settings_gc_9_0_vg20)); ARRAY_SIZE(golden_settings_gc_9_0_vg20));
break; break;
case CHIP_ARCTURUS: case IP_VERSION(9, 4, 1):
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_gc_9_4_1_arct, golden_settings_gc_9_4_1_arct,
ARRAY_SIZE(golden_settings_gc_9_4_1_arct)); ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
break; break;
case CHIP_RAVEN: case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
soc15_program_register_sequence(adev, golden_settings_gc_9_1, soc15_program_register_sequence(adev, golden_settings_gc_9_1,
ARRAY_SIZE(golden_settings_gc_9_1)); ARRAY_SIZE(golden_settings_gc_9_1));
if (adev->apu_flags & AMD_APU_IS_RAVEN2) if (adev->apu_flags & AMD_APU_IS_RAVEN2)
...@@ -995,12 +996,12 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -995,12 +996,12 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_1_rv1, golden_settings_gc_9_1_rv1,
ARRAY_SIZE(golden_settings_gc_9_1_rv1)); ARRAY_SIZE(golden_settings_gc_9_1_rv1));
break; break;
case CHIP_RENOIR: case IP_VERSION(9, 3, 0):
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_gc_9_1_rn, golden_settings_gc_9_1_rn,
ARRAY_SIZE(golden_settings_gc_9_1_rn)); ARRAY_SIZE(golden_settings_gc_9_1_rn));
return; /* for renoir, don't need common goldensetting */ return; /* for renoir, don't need common goldensetting */
case CHIP_ALDEBARAN: case IP_VERSION(9, 4, 2):
gfx_v9_4_2_init_golden_registers(adev, gfx_v9_4_2_init_golden_registers(adev,
adev->smuio.funcs->get_die_id(adev)); adev->smuio.funcs->get_die_id(adev));
break; break;
...@@ -1008,8 +1009,8 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -1008,8 +1009,8 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
break; break;
} }
if ((adev->asic_type != CHIP_ARCTURUS) && if ((adev->ip_versions[GC_HWIP] != IP_VERSION(9, 4, 1)) &&
(adev->asic_type != CHIP_ALDEBARAN)) (adev->ip_versions[GC_HWIP] != IP_VERSION(9, 4, 2)))
soc15_program_register_sequence(adev, golden_settings_gc_9_x_common, soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common)); (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
} }
...@@ -1193,15 +1194,15 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) ...@@ -1193,15 +1194,15 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.me_fw_write_wait = false; adev->gfx.me_fw_write_wait = false;
adev->gfx.mec_fw_write_wait = false; adev->gfx.mec_fw_write_wait = false;
if ((adev->asic_type != CHIP_ARCTURUS) && if ((adev->ip_versions[GC_HWIP] != IP_VERSION(9, 4, 1)) &&
((adev->gfx.mec_fw_version < 0x000001a5) || ((adev->gfx.mec_fw_version < 0x000001a5) ||
(adev->gfx.mec_feature_version < 46) || (adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) || (adev->gfx.pfp_fw_version < 0x000000b7) ||
(adev->gfx.pfp_feature_version < 46))) (adev->gfx.pfp_feature_version < 46)))
DRM_WARN_ONCE("CP firmware version too old, please update!"); DRM_WARN_ONCE("CP firmware version too old, please update!");
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP]) {
case CHIP_VEGA10: case IP_VERSION(9, 0, 1):
if ((adev->gfx.me_fw_version >= 0x0000009c) && if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 42) && (adev->gfx.me_feature_version >= 42) &&
(adev->gfx.pfp_fw_version >= 0x000000b1) && (adev->gfx.pfp_fw_version >= 0x000000b1) &&
...@@ -1212,7 +1213,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) ...@@ -1212,7 +1213,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version >= 42)) (adev->gfx.mec_feature_version >= 42))
adev->gfx.mec_fw_write_wait = true; adev->gfx.mec_fw_write_wait = true;
break; break;
case CHIP_VEGA12: case IP_VERSION(9, 2, 1):
if ((adev->gfx.me_fw_version >= 0x0000009c) && if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 44) && (adev->gfx.me_feature_version >= 44) &&
(adev->gfx.pfp_fw_version >= 0x000000b2) && (adev->gfx.pfp_fw_version >= 0x000000b2) &&
...@@ -1223,7 +1224,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) ...@@ -1223,7 +1224,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version >= 44)) (adev->gfx.mec_feature_version >= 44))
adev->gfx.mec_fw_write_wait = true; adev->gfx.mec_fw_write_wait = true;
break; break;
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
if ((adev->gfx.me_fw_version >= 0x0000009c) && if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 44) && (adev->gfx.me_feature_version >= 44) &&
(adev->gfx.pfp_fw_version >= 0x000000b2) && (adev->gfx.pfp_fw_version >= 0x000000b2) &&
...@@ -1234,7 +1235,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) ...@@ -1234,7 +1235,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version >= 44)) (adev->gfx.mec_feature_version >= 44))
adev->gfx.mec_fw_write_wait = true; adev->gfx.mec_fw_write_wait = true;
break; break;
case CHIP_RAVEN: case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
if ((adev->gfx.me_fw_version >= 0x0000009c) && if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 42) && (adev->gfx.me_feature_version >= 42) &&
(adev->gfx.pfp_fw_version >= 0x000000b1) && (adev->gfx.pfp_fw_version >= 0x000000b1) &&
...@@ -1297,7 +1299,7 @@ static bool is_raven_kicker(struct amdgpu_device *adev) ...@@ -1297,7 +1299,7 @@ static bool is_raven_kicker(struct amdgpu_device *adev)
static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev) static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
{ {
if ((adev->asic_type == CHIP_RENOIR) && if ((adev->ip_versions[GC_HWIP] == IP_VERSION(9, 3, 0)) &&
(adev->gfx.me_fw_version >= 0x000000a5) && (adev->gfx.me_fw_version >= 0x000000a5) &&
(adev->gfx.me_feature_version >= 52)) (adev->gfx.me_feature_version >= 52))
return true; return true;
...@@ -1310,12 +1312,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) ...@@ -1310,12 +1312,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
if (gfx_v9_0_should_disable_gfxoff(adev->pdev)) if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK; adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP]) {
case CHIP_VEGA10: case IP_VERSION(9, 0, 1):
case CHIP_VEGA12: case IP_VERSION(9, 2, 1):
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
break; break;
case CHIP_RAVEN: case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) || if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
(adev->apu_flags & AMD_APU_IS_PICASSO)) && (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
((!is_raven_kicker(adev) && ((!is_raven_kicker(adev) &&
...@@ -1329,7 +1332,7 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) ...@@ -1329,7 +1332,7 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_RLC_SMU_HS; AMD_PG_SUPPORT_RLC_SMU_HS;
break; break;
case CHIP_RENOIR: case IP_VERSION(9, 3, 0):
if (adev->pm.pp_feature & PP_GFXOFF_MASK) if (adev->pm.pp_feature & PP_GFXOFF_MASK)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_CP |
...@@ -1553,9 +1556,9 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev, ...@@ -1553,9 +1556,9 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev) static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
{ {
if (adev->asic_type == CHIP_ALDEBARAN || if (adev->ip_versions[GC_HWIP] == IP_VERSION(9, 4, 2) ||
adev->asic_type == CHIP_ARCTURUS || adev->ip_versions[GC_HWIP] == IP_VERSION(9, 4, 1) ||
adev->asic_type == CHIP_RENOIR) adev->ip_versions[GC_HWIP] == IP_VERSION(9, 3, 0))
return false; return false;
return true; return true;
...@@ -1663,17 +1666,18 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) ...@@ -1663,17 +1666,18 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n"); DRM_DEBUG("\n");
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP]) {
case CHIP_VEGA10: case IP_VERSION(9, 0, 1):
chip_name = "vega10"; chip_name = "vega10";
break; break;
case CHIP_VEGA12: case IP_VERSION(9, 2, 1):
chip_name = "vega12"; chip_name = "vega12";
break; break;
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
chip_name = "vega20"; chip_name = "vega20";
break; break;
case CHIP_RAVEN: case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
if (adev->apu_flags & AMD_APU_IS_RAVEN2) if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2"; chip_name = "raven2";
else if (adev->apu_flags & AMD_APU_IS_PICASSO) else if (adev->apu_flags & AMD_APU_IS_PICASSO)
...@@ -1681,16 +1685,16 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) ...@@ -1681,16 +1685,16 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
else else
chip_name = "raven"; chip_name = "raven";
break; break;
case CHIP_ARCTURUS: case IP_VERSION(9, 4, 1):
chip_name = "arcturus"; chip_name = "arcturus";
break; break;
case CHIP_RENOIR: case IP_VERSION(9, 3, 0):
if (adev->apu_flags & AMD_APU_IS_RENOIR) if (adev->apu_flags & AMD_APU_IS_RENOIR)
chip_name = "renoir"; chip_name = "renoir";
else else
chip_name = "green_sardine"; chip_name = "green_sardine";
break; break;
case CHIP_ALDEBARAN: case IP_VERSION(9, 4, 2):
chip_name = "aldebaran"; chip_name = "aldebaran";
break; break;
default: default:
...@@ -1794,7 +1798,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) ...@@ -1794,7 +1798,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
always_on_cu_num = 4; always_on_cu_num = 4;
else if (adev->asic_type == CHIP_VEGA12) else if (adev->ip_versions[GC_HWIP] == IP_VERSION(9, 2, 1))
always_on_cu_num = 8; always_on_cu_num = 8;
else else
always_on_cu_num = 12; always_on_cu_num = 12;
...@@ -1963,11 +1967,12 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) ...@@ -1963,11 +1967,12 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
return r; return r;
} }
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP]) {
case CHIP_RAVEN: case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
gfx_v9_0_init_lbpw(adev); gfx_v9_0_init_lbpw(adev);
break; break;
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
gfx_v9_4_init_lbpw(adev); gfx_v9_4_init_lbpw(adev);
break; break;
default: default:
...@@ -2142,8 +2147,8 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) ...@@ -2142,8 +2147,8 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.funcs = &gfx_v9_0_gfx_funcs; adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP]) {
case CHIP_VEGA10: case IP_VERSION(9, 0, 1):
adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100; adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
...@@ -2151,7 +2156,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) ...@@ -2151,7 +2156,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN; gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
break; break;
case CHIP_VEGA12: case IP_VERSION(9, 2, 1):
adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100; adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
...@@ -2160,7 +2165,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) ...@@ -2160,7 +2165,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN; gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
DRM_INFO("fix gfx.config for vega12\n"); DRM_INFO("fix gfx.config for vega12\n");
break; break;
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs; adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs;
adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
...@@ -2175,7 +2180,8 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) ...@@ -2175,7 +2180,8 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
if (err) if (err)
return err; return err;
break; break;
case CHIP_RAVEN: case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100; adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
...@@ -2186,7 +2192,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) ...@@ -2186,7 +2192,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
else else
gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
break; break;
case CHIP_ARCTURUS: case IP_VERSION(9, 4, 1):
adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs; adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs;
adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
...@@ -2197,7 +2203,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) ...@@ -2197,7 +2203,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config &= ~0xf3e777ff; gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22014042; gb_addr_config |= 0x22014042;
break; break;
case CHIP_RENOIR: case IP_VERSION(9, 3, 0):
adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100; adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
...@@ -2207,7 +2213,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) ...@@ -2207,7 +2213,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config &= ~0xf3e777ff; gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22010042; gb_addr_config |= 0x22010042;
break; break;
case CHIP_ALDEBARAN: case IP_VERSION(9, 4, 2):
adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs; adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs;
adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
...@@ -2305,14 +2311,15 @@ static int gfx_v9_0_sw_init(void *handle) ...@@ -2305,14 +2311,15 @@ static int gfx_v9_0_sw_init(void *handle)
struct amdgpu_kiq *kiq; struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP]) {
case CHIP_VEGA10: case IP_VERSION(9, 0, 1):
case CHIP_VEGA12: case IP_VERSION(9, 2, 1):
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
case CHIP_RAVEN: case IP_VERSION(9, 2, 2):
case CHIP_ARCTURUS: case IP_VERSION(9, 1, 0):
case CHIP_RENOIR: case IP_VERSION(9, 4, 1):
case CHIP_ALDEBARAN: case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 2):
adev->gfx.mec.num_mec = 2; adev->gfx.mec.num_mec = 2;
break; break;
default: default:
...@@ -2596,8 +2603,8 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev) ...@@ -2596,8 +2603,8 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
{ {
uint32_t tmp; uint32_t tmp;
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP]) {
case CHIP_ARCTURUS: case IP_VERSION(9, 4, 1):
tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG); tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
tmp = REG_SET_FIELD(tmp, SQ_CONFIG, tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
DISABLE_BARRIER_WAITCNT, 1); DISABLE_BARRIER_WAITCNT, 1);
...@@ -2932,7 +2939,7 @@ static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev) ...@@ -2932,7 +2939,7 @@ static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
/* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */ /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data); WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
if (adev->asic_type != CHIP_RENOIR) if (adev->ip_versions[GC_HWIP] != IP_VERSION(9, 3, 0))
pwr_10_0_gfxip_control_over_cgpg(adev, true); pwr_10_0_gfxip_control_over_cgpg(adev, true);
} }
} }
...@@ -3044,7 +3051,7 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) ...@@ -3044,7 +3051,7 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
* And it's needed by gfxoff feature. * And it's needed by gfxoff feature.
*/ */
if (adev->gfx.rlc.is_rlc_v2_1) { if (adev->gfx.rlc.is_rlc_v2_1) {
if (adev->asic_type == CHIP_VEGA12 || if (adev->ip_versions[GC_HWIP] == IP_VERSION(9, 2, 1) ||
(adev->apu_flags & AMD_APU_IS_RAVEN2)) (adev->apu_flags & AMD_APU_IS_RAVEN2))
gfx_v9_1_init_rlc_save_restore_list(adev); gfx_v9_1_init_rlc_save_restore_list(adev);
gfx_v9_0_enable_save_restore_machine(adev); gfx_v9_0_enable_save_restore_machine(adev);
...@@ -3157,14 +3164,15 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) ...@@ -3157,14 +3164,15 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
return r; return r;
} }
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP]) {
case CHIP_RAVEN: case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
if (amdgpu_lbpw == 0) if (amdgpu_lbpw == 0)
gfx_v9_0_enable_lbpw(adev, false); gfx_v9_0_enable_lbpw(adev, false);
else else
gfx_v9_0_enable_lbpw(adev, true); gfx_v9_0_enable_lbpw(adev, true);
break; break;
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
if (amdgpu_lbpw > 0) if (amdgpu_lbpw > 0)
gfx_v9_0_enable_lbpw(adev, true); gfx_v9_0_enable_lbpw(adev, true);
else else
...@@ -3959,8 +3967,8 @@ static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev) ...@@ -3959,8 +3967,8 @@ static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
{ {
u32 tmp; u32 tmp;
if (adev->asic_type != CHIP_ARCTURUS && if (adev->ip_versions[GC_HWIP] != IP_VERSION(9, 4, 1) &&
adev->asic_type != CHIP_ALDEBARAN) adev->ip_versions[GC_HWIP] != IP_VERSION(9, 4, 2))
return; return;
tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG); tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
...@@ -4000,7 +4008,7 @@ static int gfx_v9_0_hw_init(void *handle) ...@@ -4000,7 +4008,7 @@ static int gfx_v9_0_hw_init(void *handle)
if (r) if (r)
return r; return r;
if (adev->asic_type == CHIP_ALDEBARAN) if (adev->ip_versions[GC_HWIP] == IP_VERSION(9, 4, 2))
gfx_v9_4_2_set_power_brake_sequence(adev); gfx_v9_4_2_set_power_brake_sequence(adev);
return r; return r;
...@@ -4232,7 +4240,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) ...@@ -4232,7 +4240,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
amdgpu_gfx_off_ctrl(adev, false); amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex); mutex_lock(&adev->gfx.gpu_clock_mutex);
if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) { if (adev->ip_versions[GC_HWIP] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
clock = gfx_v9_0_kiq_read_clock(adev); clock = gfx_v9_0_kiq_read_clock(adev);
} else { } else {
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
...@@ -4582,7 +4590,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) ...@@ -4582,7 +4590,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
if (!ring->sched.ready) if (!ring->sched.ready)
return 0; return 0;
if (adev->asic_type == CHIP_ARCTURUS) { if (adev->ip_versions[GC_HWIP] == IP_VERSION(9, 4, 1)) {
vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus; vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus); vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
vgpr_init_regs_ptr = vgpr_init_regs_arcturus; vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
...@@ -4732,8 +4740,8 @@ static int gfx_v9_0_early_init(void *handle) ...@@ -4732,8 +4740,8 @@ static int gfx_v9_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->asic_type == CHIP_ARCTURUS || if (adev->ip_versions[GC_HWIP] == IP_VERSION(9, 4, 1) ||
adev->asic_type == CHIP_ALDEBARAN) adev->ip_versions[GC_HWIP] == IP_VERSION(9, 4, 2))
adev->gfx.num_gfx_rings = 0; adev->gfx.num_gfx_rings = 0;
else else
adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
...@@ -4767,7 +4775,7 @@ static int gfx_v9_0_ecc_late_init(void *handle) ...@@ -4767,7 +4775,7 @@ static int gfx_v9_0_ecc_late_init(void *handle)
} }
/* requires IBs so do in late init after IB pool is initialized */ /* requires IBs so do in late init after IB pool is initialized */
if (adev->asic_type == CHIP_ALDEBARAN) if (adev->ip_versions[GC_HWIP] == IP_VERSION(9, 4, 2))
r = gfx_v9_4_2_do_edc_gpr_workarounds(adev); r = gfx_v9_4_2_do_edc_gpr_workarounds(adev);
else else
r = gfx_v9_0_do_edc_gpr_workarounds(adev); r = gfx_v9_0_do_edc_gpr_workarounds(adev);
...@@ -4895,7 +4903,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev ...@@ -4895,7 +4903,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* 1 - RLC_CGTT_MGCG_OVERRIDE */ /* 1 - RLC_CGTT_MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
if (adev->asic_type != CHIP_VEGA12) if (adev->ip_versions[GC_HWIP] != IP_VERSION(9, 2, 1))
data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
...@@ -4929,7 +4937,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev ...@@ -4929,7 +4937,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* 1 - MGCG_OVERRIDE */ /* 1 - MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
if (adev->asic_type != CHIP_VEGA12) if (adev->ip_versions[GC_HWIP] != IP_VERSION(9, 2, 1))
data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
...@@ -5035,7 +5043,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev ...@@ -5035,7 +5043,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
/* enable cgcg FSM(0x0000363F) */ /* enable cgcg FSM(0x0000363F) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
if (adev->asic_type == CHIP_ARCTURUS) if (adev->ip_versions[GC_HWIP] == IP_VERSION(9, 4, 1))
data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
else else
...@@ -5161,9 +5169,10 @@ static int gfx_v9_0_set_powergating_state(void *handle, ...@@ -5161,9 +5169,10 @@ static int gfx_v9_0_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE); bool enable = (state == AMD_PG_STATE_GATE);
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP]) {
case CHIP_RAVEN: case IP_VERSION(9, 2, 2):
case CHIP_RENOIR: case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 3, 0):
if (!enable) if (!enable)
amdgpu_gfx_off_ctrl(adev, false); amdgpu_gfx_off_ctrl(adev, false);
...@@ -5189,7 +5198,7 @@ static int gfx_v9_0_set_powergating_state(void *handle, ...@@ -5189,7 +5198,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
if (enable) if (enable)
amdgpu_gfx_off_ctrl(adev, true); amdgpu_gfx_off_ctrl(adev, true);
break; break;
case CHIP_VEGA12: case IP_VERSION(9, 2, 1):
amdgpu_gfx_off_ctrl(adev, enable); amdgpu_gfx_off_ctrl(adev, enable);
break; break;
default: default:
...@@ -5207,14 +5216,15 @@ static int gfx_v9_0_set_clockgating_state(void *handle, ...@@ -5207,14 +5216,15 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return 0; return 0;
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP]) {
case CHIP_VEGA10: case IP_VERSION(9, 0, 1):
case CHIP_VEGA12: case IP_VERSION(9, 2, 1):
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
case CHIP_RAVEN: case IP_VERSION(9, 2, 2):
case CHIP_ARCTURUS: case IP_VERSION(9, 1, 0):
case CHIP_RENOIR: case IP_VERSION(9, 4, 1):
case CHIP_ALDEBARAN: case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 2):
gfx_v9_0_update_gfx_clock_gating(adev, gfx_v9_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE); state == AMD_CG_STATE_GATE);
break; break;
...@@ -5256,7 +5266,7 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags) ...@@ -5256,7 +5266,7 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
if (adev->asic_type != CHIP_ARCTURUS) { if (adev->ip_versions[GC_HWIP] != IP_VERSION(9, 4, 1)) {
/* AMD_CG_SUPPORT_GFX_3D_CGCG */ /* AMD_CG_SUPPORT_GFX_3D_CGCG */
data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D)); data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
...@@ -7027,14 +7037,15 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev) ...@@ -7027,14 +7037,15 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP]) {
case CHIP_VEGA10: case IP_VERSION(9, 0, 1):
case CHIP_VEGA12: case IP_VERSION(9, 2, 1):
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
case CHIP_RAVEN: case IP_VERSION(9, 2, 2):
case CHIP_ARCTURUS: case IP_VERSION(9, 1, 0):
case CHIP_RENOIR: case IP_VERSION(9, 4, 1):
case CHIP_ALDEBARAN: case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 2):
adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs; adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
break; break;
default: default:
...@@ -7045,17 +7056,18 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) ...@@ -7045,17 +7056,18 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
{ {
/* init asci gds info */ /* init asci gds info */
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP]) {
case CHIP_VEGA10: case IP_VERSION(9, 0, 1):
case CHIP_VEGA12: case IP_VERSION(9, 2, 1):
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
adev->gds.gds_size = 0x10000; adev->gds.gds_size = 0x10000;
break; break;
case CHIP_RAVEN: case IP_VERSION(9, 2, 2):
case CHIP_ARCTURUS: case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 4, 1):
adev->gds.gds_size = 0x1000; adev->gds.gds_size = 0x1000;
break; break;
case CHIP_ALDEBARAN: case IP_VERSION(9, 4, 2):
/* aldebaran removed all the GDS internal memory, /* aldebaran removed all the GDS internal memory,
* only support GWS opcode in kernel, like barrier * only support GWS opcode in kernel, like barrier
* semaphore.etc */ * semaphore.etc */
...@@ -7066,24 +7078,25 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) ...@@ -7066,24 +7078,25 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
break; break;
} }
switch (adev->asic_type) { switch (adev->ip_versions[GC_HWIP]) {
case CHIP_VEGA10: case IP_VERSION(9, 0, 1):
case CHIP_VEGA20: case IP_VERSION(9, 4, 0):
adev->gds.gds_compute_max_wave_id = 0x7ff; adev->gds.gds_compute_max_wave_id = 0x7ff;
break; break;
case CHIP_VEGA12: case IP_VERSION(9, 2, 1):
adev->gds.gds_compute_max_wave_id = 0x27f; adev->gds.gds_compute_max_wave_id = 0x27f;
break; break;
case CHIP_RAVEN: case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
if (adev->apu_flags & AMD_APU_IS_RAVEN2) if (adev->apu_flags & AMD_APU_IS_RAVEN2)
adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */ adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
else else
adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */ adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
break; break;
case CHIP_ARCTURUS: case IP_VERSION(9, 4, 1):
adev->gds.gds_compute_max_wave_id = 0xfff; adev->gds.gds_compute_max_wave_id = 0xfff;
break; break;
case CHIP_ALDEBARAN: case IP_VERSION(9, 4, 2):
/* deprecated for Aldebaran, no usage at all */ /* deprecated for Aldebaran, no usage at all */
adev->gds.gds_compute_max_wave_id = 0; adev->gds.gds_compute_max_wave_id = 0;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment