Commit 2e4e9de1 authored by Daniel Vetter's avatar Daniel Vetter

Merge tag 'amd-drm-fixes-6.3-2023-03-23' of...

Merge tag 'amd-drm-fixes-6.3-2023-03-23' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.3-2023-03-23:

amdgpu:
- S4 fix
- Soft reset fixes
- SR-IOV fix
- Remove an out of date comment in the DC code
- ASPM fix
- DCN 3.2 fixes
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230323161939.7751-1-alexander.deucher@amd.com
parents e37fef79 f9537b1f
...@@ -1272,6 +1272,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); ...@@ -1272,6 +1272,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev); int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
bool amdgpu_device_aspm_support_quirk(void);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes); u64 num_vis_bytes);
...@@ -1391,10 +1392,12 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta ...@@ -1391,10 +1392,12 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
void amdgpu_acpi_detect(void); void amdgpu_acpi_detect(void);
#else #else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_acpi_detect(void) { } static inline void amdgpu_acpi_detect(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
...@@ -1405,11 +1408,9 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, ...@@ -1405,11 +1408,9 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
#else #else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
#endif #endif
......
...@@ -971,6 +971,29 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev) ...@@ -971,6 +971,29 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
return true; return true;
} }
/**
* amdgpu_acpi_should_gpu_reset
*
* @adev: amdgpu_device_pointer
*
* returns true if should reset GPU, false if not
*/
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
if (amdgpu_sriov_vf(adev))
return false;
#if IS_ENABLED(CONFIG_SUSPEND)
return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
#else
return true;
#endif
}
/* /*
* amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods * amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods
* *
...@@ -1042,24 +1065,6 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) ...@@ -1042,24 +1065,6 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
(pm_suspend_target_state == PM_SUSPEND_MEM); (pm_suspend_target_state == PM_SUSPEND_MEM);
} }
/**
* amdgpu_acpi_should_gpu_reset
*
* @adev: amdgpu_device_pointer
*
* returns true if should reset GPU, false if not
*/
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
if (amdgpu_sriov_vf(adev))
return false;
return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
}
/** /**
* amdgpu_acpi_is_s0ix_active * amdgpu_acpi_is_s0ix_active
* *
......
...@@ -80,6 +80,10 @@ ...@@ -80,6 +80,10 @@
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
#if IS_ENABLED(CONFIG_X86)
#include <asm/intel-family.h>
#endif
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
...@@ -1356,6 +1360,17 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev) ...@@ -1356,6 +1360,17 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
return pcie_aspm_enabled(adev->pdev); return pcie_aspm_enabled(adev->pdev);
} }
bool amdgpu_device_aspm_support_quirk(void)
{
#if IS_ENABLED(CONFIG_X86)
struct cpuinfo_x86 *c = &cpu_data(0);
return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
#else
return true;
#endif
}
/* if we get transitioned to only one device, take VGA back */ /* if we get transitioned to only one device, take VGA back */
/** /**
* amdgpu_device_vga_set_decode - enable/disable vga decode * amdgpu_device_vga_set_decode - enable/disable vga decode
......
...@@ -2467,7 +2467,10 @@ static int amdgpu_pmops_freeze(struct device *dev) ...@@ -2467,7 +2467,10 @@ static int amdgpu_pmops_freeze(struct device *dev)
adev->in_s4 = false; adev->in_s4 = false;
if (r) if (r)
return r; return r;
return amdgpu_asic_reset(adev);
if (amdgpu_acpi_should_gpu_reset(adev))
return amdgpu_asic_reset(adev);
return 0;
} }
static int amdgpu_pmops_thaw(struct device *dev) static int amdgpu_pmops_thaw(struct device *dev)
......
...@@ -678,6 +678,15 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) ...@@ -678,6 +678,15 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
ptr = &ring->fence_drv.fences[i]; ptr = &ring->fence_drv.fences[i];
old = rcu_dereference_protected(*ptr, 1); old = rcu_dereference_protected(*ptr, 1);
if (old && old->ops == &amdgpu_job_fence_ops) { if (old && old->ops == &amdgpu_job_fence_ops) {
struct amdgpu_job *job;
/* For non-scheduler bad job, i.e. failed ib test, we need to signal
* it right here or we won't be able to track them in fence_drv
* and they will remain unsignaled during sa_bo free.
*/
job = container_of(old, struct amdgpu_job, hw_fence);
if (!job->base.s_fence && !dma_fence_is_signaled(old))
dma_fence_signal(old);
RCU_INIT_POINTER(*ptr, NULL); RCU_INIT_POINTER(*ptr, NULL);
dma_fence_put(old); dma_fence_put(old);
} }
......
...@@ -1287,6 +1287,11 @@ static int gfx_v11_0_sw_init(void *handle) ...@@ -1287,6 +1287,11 @@ static int gfx_v11_0_sw_init(void *handle)
break; break;
} }
/* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) &&
amdgpu_sriov_is_pp_one_vf(adev))
adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
/* EOP Event */ /* EOP Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
...@@ -4655,6 +4660,14 @@ static bool gfx_v11_0_check_soft_reset(void *handle) ...@@ -4655,6 +4660,14 @@ static bool gfx_v11_0_check_soft_reset(void *handle)
return false; return false;
} }
static int gfx_v11_0_post_soft_reset(void *handle)
{
/**
* GFX soft reset will impact MES, need resume MES when do GFX soft reset
*/
return amdgpu_mes_resume((struct amdgpu_device *)handle);
}
static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{ {
uint64_t clock; uint64_t clock;
...@@ -6166,6 +6179,7 @@ static const struct amd_ip_funcs gfx_v11_0_ip_funcs = { ...@@ -6166,6 +6179,7 @@ static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
.wait_for_idle = gfx_v11_0_wait_for_idle, .wait_for_idle = gfx_v11_0_wait_for_idle,
.soft_reset = gfx_v11_0_soft_reset, .soft_reset = gfx_v11_0_soft_reset,
.check_soft_reset = gfx_v11_0_check_soft_reset, .check_soft_reset = gfx_v11_0_check_soft_reset,
.post_soft_reset = gfx_v11_0_post_soft_reset,
.set_clockgating_state = gfx_v11_0_set_clockgating_state, .set_clockgating_state = gfx_v11_0_set_clockgating_state,
.set_powergating_state = gfx_v11_0_set_powergating_state, .set_powergating_state = gfx_v11_0_set_powergating_state,
.get_clockgating_state = gfx_v11_0_get_clockgating_state, .get_clockgating_state = gfx_v11_0_get_clockgating_state,
......
...@@ -578,7 +578,7 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev) ...@@ -578,7 +578,7 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
static void nv_program_aspm(struct amdgpu_device *adev) static void nv_program_aspm(struct amdgpu_device *adev)
{ {
if (!amdgpu_device_should_use_aspm(adev)) if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
return; return;
if (!(adev->flags & AMD_IS_APU) && if (!(adev->flags & AMD_IS_APU) &&
......
...@@ -81,10 +81,6 @@ ...@@ -81,10 +81,6 @@
#include "mxgpu_vi.h" #include "mxgpu_vi.h"
#include "amdgpu_dm.h" #include "amdgpu_dm.h"
#if IS_ENABLED(CONFIG_X86)
#include <asm/intel-family.h>
#endif
#define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6 #define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
...@@ -1138,24 +1134,13 @@ static void vi_enable_aspm(struct amdgpu_device *adev) ...@@ -1138,24 +1134,13 @@ static void vi_enable_aspm(struct amdgpu_device *adev)
WREG32_PCIE(ixPCIE_LC_CNTL, data); WREG32_PCIE(ixPCIE_LC_CNTL, data);
} }
static bool aspm_support_quirk_check(void)
{
#if IS_ENABLED(CONFIG_X86)
struct cpuinfo_x86 *c = &cpu_data(0);
return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
#else
return true;
#endif
}
static void vi_program_aspm(struct amdgpu_device *adev) static void vi_program_aspm(struct amdgpu_device *adev)
{ {
u32 data, data1, orig; u32 data, data1, orig;
bool bL1SS = false; bool bL1SS = false;
bool bClkReqSupport = true; bool bClkReqSupport = true;
if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check()) if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
return; return;
if (adev->flags & AMD_IS_APU || if (adev->flags & AMD_IS_APU ||
......
...@@ -7244,7 +7244,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, ...@@ -7244,7 +7244,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
if (!aconnector->mst_root) if (!aconnector->mst_root)
drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
/* This defaults to the max in the range, but we want 8bpc for non-edp. */
aconnector->base.state->max_bpc = 16; aconnector->base.state->max_bpc = 16;
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
......
...@@ -271,8 +271,7 @@ static void dccg32_set_dpstreamclk( ...@@ -271,8 +271,7 @@ static void dccg32_set_dpstreamclk(
dccg32_set_dtbclk_p_src(dccg, src, otg_inst); dccg32_set_dtbclk_p_src(dccg, src, otg_inst);
/* enabled to select one of the DTBCLKs for pipe */ /* enabled to select one of the DTBCLKs for pipe */
switch (otg_inst) switch (dp_hpo_inst) {
{
case 0: case 0:
REG_UPDATE_2(DPSTREAMCLK_CNTL, REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_EN, DPSTREAMCLK0_EN,
......
...@@ -2186,6 +2186,7 @@ static bool dcn32_resource_construct( ...@@ -2186,6 +2186,7 @@ static bool dcn32_resource_construct(
dc->caps.edp_dsc_support = true; dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true; dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true; dc->caps.dmcub_support = true;
dc->caps.seamless_odm = true;
/* Color pipeline capabilities */ /* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1; dc->caps.color.dpp.dcn_arch = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment