Commit 13aa38f8 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-6.4-2023-05-24' of...

Merge tag 'amd-drm-fixes-6.4-2023-05-24' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.4-2023-05-24:

amdgpu:
- Fix missing BO unlocking in KIQ error path
- Avoid spurious secure display error messages
- SMU13 fix
- Fix an OD regression
- GPU reset display IRQ warning fix
- MST fix

radeon:
- Fix a DP regression
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230524211238.7749-1-alexander.deucher@amd.com
parents 94d39d01 482e6ad9
...@@ -6892,8 +6892,10 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev) ...@@ -6892,8 +6892,10 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
return r; return r;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (unlikely(r != 0)) if (unlikely(r != 0)) {
amdgpu_bo_unreserve(ring->mqd_obj);
return r; return r;
}
gfx_v10_0_kiq_init_queue(ring); gfx_v10_0_kiq_init_queue(ring);
amdgpu_bo_kunmap(ring->mqd_obj); amdgpu_bo_kunmap(ring->mqd_obj);
......
...@@ -3617,8 +3617,10 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) ...@@ -3617,8 +3617,10 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
return r; return r;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (unlikely(r != 0)) if (unlikely(r != 0)) {
amdgpu_bo_unreserve(ring->mqd_obj);
return r; return r;
}
gfx_v9_0_kiq_init_queue(ring); gfx_v9_0_kiq_init_queue(ring);
amdgpu_bo_kunmap(ring->mqd_obj); amdgpu_bo_kunmap(ring->mqd_obj);
......
...@@ -57,7 +57,13 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) ...@@ -57,7 +57,13 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
if (err) if (err)
return err; return err;
return psp_init_ta_microcode(psp, ucode_prefix); err = psp_init_ta_microcode(psp, ucode_prefix);
if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 1, 0)) &&
(adev->pdev->revision == 0xa1) &&
(psp->securedisplay_context.context.bin_desc.fw_version >= 0x27000008)) {
adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0;
}
return err;
} }
static int psp_v10_0_ring_create(struct psp_context *psp, static int psp_v10_0_ring_create(struct psp_context *psp,
......
...@@ -2479,20 +2479,25 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, ...@@ -2479,20 +2479,25 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
if (acrtc && state->stream_status[i].plane_count != 0) { if (acrtc && state->stream_status[i].plane_count != 0) {
irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
acrtc->crtc_id, enable ? "en" : "dis", rc);
if (rc) if (rc)
DRM_WARN("Failed to %s pflip interrupts\n", DRM_WARN("Failed to %s pflip interrupts\n",
enable ? "enable" : "disable"); enable ? "enable" : "disable");
if (enable) { if (enable) {
rc = amdgpu_dm_crtc_enable_vblank(&acrtc->base); if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
} else
rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
if (rc) if (rc)
DRM_WARN("Failed to enable vblank interrupts\n"); DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
} else {
amdgpu_dm_crtc_disable_vblank(&acrtc->base);
}
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
/* During gpu-reset we disable and then enable vblank irq, so
* don't use amdgpu_irq_get/put() to avoid refcount change.
*/
if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
} }
} }
...@@ -2852,7 +2857,7 @@ static int dm_resume(void *handle) ...@@ -2852,7 +2857,7 @@ static int dm_resume(void *handle)
* this is the case when traversing through already created * this is the case when traversing through already created
* MST connectors, should be skipped * MST connectors, should be skipped
*/ */
if (aconnector->dc_link->type == dc_connection_mst_branch) if (aconnector && aconnector->mst_root)
continue; continue;
mutex_lock(&aconnector->hpd_lock); mutex_lock(&aconnector->hpd_lock);
...@@ -6737,7 +6742,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, ...@@ -6737,7 +6742,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
int clock, bpp = 0; int clock, bpp = 0;
bool is_y420 = false; bool is_y420 = false;
if (!aconnector->mst_output_port || !aconnector->dc_sink) if (!aconnector->mst_output_port)
return 0; return 0;
mst_port = aconnector->mst_output_port; mst_port = aconnector->mst_output_port;
......
...@@ -146,7 +146,6 @@ static void vblank_control_worker(struct work_struct *work) ...@@ -146,7 +146,6 @@ static void vblank_control_worker(struct work_struct *work)
static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
{ {
enum dc_irq_source irq_source;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev); struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
...@@ -169,18 +168,9 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) ...@@ -169,18 +168,9 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
if (rc) if (rc)
return rc; return rc;
if (amdgpu_in_reset(adev)) {
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
/* During gpu-reset we disable and then enable vblank irq, so
* don't use amdgpu_irq_get/put() to avoid refcount change.
*/
if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
rc = -EBUSY;
} else {
rc = (enable) rc = (enable)
? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id) ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
: amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id); : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
}
if (rc) if (rc)
return rc; return rc;
......
...@@ -871,14 +871,12 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, ...@@ -871,14 +871,12 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
} }
if (ret == -ENOENT) { if (ret == -ENOENT) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
if (size > 0) {
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size); size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size); size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size); size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size); size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size); size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
} }
}
if (size == 0) if (size == 0)
size = sysfs_emit(buf, "\n"); size = sysfs_emit(buf, "\n");
......
...@@ -125,6 +125,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = ...@@ -125,6 +125,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0), MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
}; };
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
......
...@@ -99,6 +99,16 @@ static void radeon_hotplug_work_func(struct work_struct *work) ...@@ -99,6 +99,16 @@ static void radeon_hotplug_work_func(struct work_struct *work)
static void radeon_dp_work_func(struct work_struct *work) static void radeon_dp_work_func(struct work_struct *work)
{ {
struct radeon_device *rdev = container_of(work, struct radeon_device,
dp_work);
struct drm_device *dev = rdev->ddev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
mutex_lock(&mode_config->mutex);
list_for_each_entry(connector, &mode_config->connector_list, head)
radeon_connector_hotplug(connector);
mutex_unlock(&mode_config->mutex);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment