Commit 68c60b34 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-6.5-2023-08-16' of...

Merge tag 'amd-drm-fixes-6.5-2023-08-16' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.5-2023-08-16:

amdgpu:
- SMU 13.x fixes
- Fix mcbp parameter for gfx9
- SMU 11.x fixes
- Temporary fix for large numbers of XCP partitions
- S0ix fixes
- DCN 2.0 fix
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230816200226.10771-1-alexander.deucher@amd.com
parents be48306f 6ecc1029
...@@ -3722,10 +3722,11 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) ...@@ -3722,10 +3722,11 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
{ {
if (amdgpu_mcbp == 1) if (amdgpu_mcbp == 1)
adev->gfx.mcbp = true; adev->gfx.mcbp = true;
else if (amdgpu_mcbp == 0)
if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) && adev->gfx.mcbp = false;
(adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) && else if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) &&
adev->gfx.num_gfx_rings) (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) &&
adev->gfx.num_gfx_rings)
adev->gfx.mcbp = true; adev->gfx.mcbp = true;
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
...@@ -4393,6 +4394,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) ...@@ -4393,6 +4394,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
cancel_delayed_work_sync(&adev->delayed_init_work); cancel_delayed_work_sync(&adev->delayed_init_work);
flush_delayed_work(&adev->gfx.gfx_off_delay_work);
amdgpu_ras_suspend(adev); amdgpu_ras_suspend(adev);
......
...@@ -551,6 +551,41 @@ int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev) ...@@ -551,6 +551,41 @@ int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
return 0; return 0;
} }
/**
* amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
* fence driver interrupts need to be restored.
*
* @ring: ring that to be checked
*
* Interrupts for rings that belong to GFX IP don't need to be restored
* when the target power state is s0ix.
*
* Return true if need to restore interrupts, false otherwise.
*/
static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
bool is_gfx_power_domain = false;
switch (ring->funcs->type) {
case AMDGPU_RING_TYPE_SDMA:
/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0))
is_gfx_power_domain = true;
break;
case AMDGPU_RING_TYPE_GFX:
case AMDGPU_RING_TYPE_COMPUTE:
case AMDGPU_RING_TYPE_KIQ:
case AMDGPU_RING_TYPE_MES:
is_gfx_power_domain = true;
break;
default:
break;
}
return !(adev->in_s0ix && is_gfx_power_domain);
}
/** /**
* amdgpu_fence_driver_hw_fini - tear down the fence driver * amdgpu_fence_driver_hw_fini - tear down the fence driver
* for all possible rings. * for all possible rings.
...@@ -579,7 +614,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) ...@@ -579,7 +614,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
amdgpu_fence_driver_force_completion(ring); amdgpu_fence_driver_force_completion(ring);
if (!drm_dev_is_unplugged(adev_to_drm(adev)) && if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
ring->fence_drv.irq_src) ring->fence_drv.irq_src &&
amdgpu_fence_need_ring_interrupt_restore(ring))
amdgpu_irq_put(adev, ring->fence_drv.irq_src, amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type); ring->fence_drv.irq_type);
...@@ -655,7 +691,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) ...@@ -655,7 +691,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
continue; continue;
/* enable the interrupt */ /* enable the interrupt */
if (ring->fence_drv.irq_src) if (ring->fence_drv.irq_src &&
amdgpu_fence_need_ring_interrupt_restore(ring))
amdgpu_irq_get(adev, ring->fence_drv.irq_src, amdgpu_irq_get(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type); ring->fence_drv.irq_type);
} }
......
...@@ -692,15 +692,8 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) ...@@ -692,15 +692,8 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (adev->gfx.gfx_off_req_count == 0 && if (adev->gfx.gfx_off_req_count == 0 &&
!adev->gfx.gfx_off_state) { !adev->gfx.gfx_off_state) {
/* If going to s2idle, no need to wait */ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
if (adev->in_s0ix) {
if (!amdgpu_dpm_set_powergating_by_smu(adev,
AMD_IP_BLOCK_TYPE_GFX, true))
adev->gfx.gfx_off_state = true;
} else {
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
delay); delay);
}
} }
} else { } else {
if (adev->gfx.gfx_off_req_count == 0) { if (adev->gfx.gfx_off_req_count == 0) {
......
...@@ -397,7 +397,7 @@ void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring) ...@@ -397,7 +397,7 @@ void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring)
struct amdgpu_ring_mux *mux = &adev->gfx.muxer; struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
WARN_ON(!ring->is_sw_ring); WARN_ON(!ring->is_sw_ring);
if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) { if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) {
if (amdgpu_mcbp_scan(mux) > 0) if (amdgpu_mcbp_scan(mux) > 0)
amdgpu_mcbp_trigger_preempt(mux); amdgpu_mcbp_trigger_preempt(mux);
return; return;
......
...@@ -239,8 +239,13 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev) ...@@ -239,8 +239,13 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
for (i = 1; i < MAX_XCP; i++) { for (i = 1; i < MAX_XCP; i++) {
ret = amdgpu_xcp_drm_dev_alloc(&p_ddev); ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
if (ret) if (ret == -ENOSPC) {
dev_warn(adev->dev,
"Skip xcp node #%d when out of drm node resource.", i);
return 0;
} else if (ret) {
return ret; return ret;
}
/* Redirect all IOCTLs to the primary device */ /* Redirect all IOCTLs to the primary device */
adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev; adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
...@@ -328,6 +333,9 @@ int amdgpu_xcp_dev_register(struct amdgpu_device *adev, ...@@ -328,6 +333,9 @@ int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
return 0; return 0;
for (i = 1; i < MAX_XCP; i++) { for (i = 1; i < MAX_XCP; i++) {
if (!adev->xcp_mgr->xcp[i].ddev)
break;
ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data); ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
if (ret) if (ret)
return ret; return ret;
...@@ -345,6 +353,9 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev) ...@@ -345,6 +353,9 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
return; return;
for (i = 1; i < MAX_XCP; i++) { for (i = 1; i < MAX_XCP; i++) {
if (!adev->xcp_mgr->xcp[i].ddev)
break;
p_ddev = adev->xcp_mgr->xcp[i].ddev; p_ddev = adev->xcp_mgr->xcp[i].ddev;
drm_dev_unplug(p_ddev); drm_dev_unplug(p_ddev);
p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev; p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
......
...@@ -1965,7 +1965,14 @@ int kfd_topology_add_device(struct kfd_node *gpu) ...@@ -1965,7 +1965,14 @@ int kfd_topology_add_device(struct kfd_node *gpu)
const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type]; const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type];
gpu_id = kfd_generate_gpu_id(gpu); gpu_id = kfd_generate_gpu_id(gpu);
pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id); if (gpu->xcp && !gpu->xcp->ddev) {
dev_warn(gpu->adev->dev,
"Won't add GPU (ID: 0x%x) to topology since it has no drm node assigned.",
gpu_id);
return 0;
} else {
pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
}
/* Check to see if this gpu device exists in the topology_device_list. /* Check to see if this gpu device exists in the topology_device_list.
* If so, assign the gpu to that device, * If so, assign the gpu to that device,
......
...@@ -712,7 +712,7 @@ static const struct dc_debug_options debug_defaults_drv = { ...@@ -712,7 +712,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false, .timing_trace = false,
.clock_trace = true, .clock_trace = true,
.disable_pplib_clock_request = true, .disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC, .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false, .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE, .disable_dcc = DCC_ENABLE,
.vsr_support = true, .vsr_support = true,
......
...@@ -588,7 +588,9 @@ static int sienna_cichlid_tables_init(struct smu_context *smu) ...@@ -588,7 +588,9 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
return -ENOMEM; return -ENOMEM;
} }
static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu) static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu,
bool use_metrics_v3,
bool use_metrics_v2)
{ {
struct smu_table_context *smu_table= &smu->smu_table; struct smu_table_context *smu_table= &smu->smu_table;
SmuMetricsExternal_t *metrics_ext = SmuMetricsExternal_t *metrics_ext =
...@@ -596,13 +598,11 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s ...@@ -596,13 +598,11 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
uint32_t throttler_status = 0; uint32_t throttler_status = 0;
int i; int i;
if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && if (use_metrics_v3) {
(smu->smc_fw_version >= 0x3A4900)) {
for (i = 0; i < THROTTLER_COUNT; i++) for (i = 0; i < THROTTLER_COUNT; i++)
throttler_status |= throttler_status |=
(metrics_ext->SmuMetrics_V3.ThrottlingPercentage[i] ? 1U << i : 0); (metrics_ext->SmuMetrics_V3.ThrottlingPercentage[i] ? 1U << i : 0);
} else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && } else if (use_metrics_v2) {
(smu->smc_fw_version >= 0x3A4300)) {
for (i = 0; i < THROTTLER_COUNT; i++) for (i = 0; i < THROTTLER_COUNT; i++)
throttler_status |= throttler_status |=
(metrics_ext->SmuMetrics_V2.ThrottlingPercentage[i] ? 1U << i : 0); (metrics_ext->SmuMetrics_V2.ThrottlingPercentage[i] ? 1U << i : 0);
...@@ -864,7 +864,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu, ...@@ -864,7 +864,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
metrics->TemperatureVrSoc) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; metrics->TemperatureVrSoc) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break; break;
case METRICS_THROTTLER_STATUS: case METRICS_THROTTLER_STATUS:
*value = sienna_cichlid_get_throttler_status_locked(smu); *value = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
break; break;
case METRICS_CURR_FANSPEED: case METRICS_CURR_FANSPEED:
*value = use_metrics_v3 ? metrics_v3->CurrFanSpeed : *value = use_metrics_v3 ? metrics_v3->CurrFanSpeed :
...@@ -4017,7 +4017,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, ...@@ -4017,7 +4017,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_dclk1 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_1] : gpu_metrics->current_dclk1 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_1] :
use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_1] : metrics->CurrClock[PPCLK_DCLK_1]; use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_1] : metrics->CurrClock[PPCLK_DCLK_1];
gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu); gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
gpu_metrics->indep_throttle_status = gpu_metrics->indep_throttle_status =
smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status, smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
sienna_cichlid_throttler_map); sienna_cichlid_throttler_map);
......
...@@ -331,6 +331,7 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu) ...@@ -331,6 +331,7 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
struct smu_13_0_0_powerplay_table *powerplay_table = struct smu_13_0_0_powerplay_table *powerplay_table =
table_context->power_play_table; table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco; struct smu_baco_context *smu_baco = &smu->smu_baco;
PPTable_t *pptable = smu->smu_table.driver_pptable;
#if 0 #if 0
PPTable_t *pptable = smu->smu_table.driver_pptable; PPTable_t *pptable = smu->smu_table.driver_pptable;
const OverDriveLimits_t * const overdrive_upperlimits = const OverDriveLimits_t * const overdrive_upperlimits =
...@@ -371,6 +372,9 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu) ...@@ -371,6 +372,9 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
table_context->thermal_controller_type = table_context->thermal_controller_type =
powerplay_table->thermal_controller_type; powerplay_table->thermal_controller_type;
smu->adev->pm.no_fan =
!(pptable->SkuTable.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT));
return 0; return 0;
} }
......
...@@ -81,9 +81,10 @@ ...@@ -81,9 +81,10 @@
#define EPSILON 1 #define EPSILON 1
#define smnPCIE_ESM_CTRL 0x193D0 #define smnPCIE_ESM_CTRL 0x193D0
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1ab40288 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x1a340288
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
#define MAX_LINK_WIDTH 6
static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
...@@ -708,16 +709,19 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, ...@@ -708,16 +709,19 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
*value = SMUQ10_TO_UINT(metrics->SocketPower) << 8; *value = SMUQ10_TO_UINT(metrics->SocketPower) << 8;
break; break;
case METRICS_TEMPERATURE_HOTSPOT: case METRICS_TEMPERATURE_HOTSPOT:
*value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature); *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break; break;
case METRICS_TEMPERATURE_MEM: case METRICS_TEMPERATURE_MEM:
*value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature); *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break; break;
/* This is the max of all VRs and not just SOC VR. /* This is the max of all VRs and not just SOC VR.
* No need to define another data type for the same. * No need to define another data type for the same.
*/ */
case METRICS_TEMPERATURE_VRSOC: case METRICS_TEMPERATURE_VRSOC:
*value = SMUQ10_TO_UINT(metrics->MaxVrTemperature); *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break; break;
default: default:
*value = UINT_MAX; *value = UINT_MAX;
...@@ -1966,6 +1970,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table ...@@ -1966,6 +1970,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
int ret = 0, inst0, xcc0; int ret = 0, inst0, xcc0;
MetricsTable_t *metrics; MetricsTable_t *metrics;
u16 link_width_level;
inst0 = adev->sdma.instance[0].aid_id; inst0 = adev->sdma.instance[0].aid_id;
xcc0 = GET_INST(GC, 0); xcc0 = GET_INST(GC, 0);
...@@ -2016,8 +2021,12 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table ...@@ -2016,8 +2021,12 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->throttle_status = 0; gpu_metrics->throttle_status = 0;
if (!(adev->flags & AMD_IS_APU)) { if (!(adev->flags & AMD_IS_APU)) {
link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
if (link_width_level > MAX_LINK_WIDTH)
link_width_level = 0;
gpu_metrics->pcie_link_width = gpu_metrics->pcie_link_width =
smu_v13_0_6_get_current_pcie_link_width_level(smu); DECODE_LANE_WIDTH(link_width_level);
gpu_metrics->pcie_link_speed = gpu_metrics->pcie_link_speed =
smu_v13_0_6_get_current_pcie_link_speed(smu); smu_v13_0_6_get_current_pcie_link_speed(smu);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment