Commit 37154c19 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2023-03-24' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Daniel Vetter:

 - usual pile of fixes for amdgpu & i915

 - probe error handling fixes for meson, lt8912b bridge

 - the host1x patch from Arnd

 - panel-orientation fix for Lenovo Book X90F

* tag 'drm-fixes-2023-03-24' of git://anongit.freedesktop.org/drm/drm: (23 commits)
  gpu: host1x: fix uninitialized variable use
  drm/amd/display: Set dcn32 caps.seamless_odm
  drm/amd/display: fix wrong index used in dccg32_set_dpstreamclk
  drm/amdgpu/nv: Apply ASPM quirk on Intel ADL + AMD Navi
  drm/amd/display: remove outdated 8bpc comments
  drm/amdgpu/gfx: set cg flags to enter/exit safe mode
  drm/amdgpu: Force signal hw_fences that are embedded in non-sched jobs
  drm/amdgpu: add mes resume when do gfx post soft reset
  drm/amdgpu: skip ASIC reset for APUs when go to S4
  drm/amdgpu: reposition the gpu reset checking for reuse
  drm/bridge: lt8912b: return EPROBE_DEFER if bridge is not found
  drm/meson: fix missing component unbind on bind errors
  drm: panel-orientation-quirks: Add quirk for Lenovo Yoga Book X90F
  Revert "drm/i915/hwmon: Enable PL1 power limit"
  drm/i915: Update vblank timestamping stuff on seamless M/N change
  drm/i915: Fix format for perf_limit_reasons
  drm/i915/gt: perform uc late init after probe error injection
  drm/i915/active: Fix missing debug object activation
  drm/i915/guc: Fix missing ecodes
  drm/i915/mtl: Disable MC6 for MTL A step
  ...
parents 5ad4fe96 08570b7c
...@@ -1272,6 +1272,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); ...@@ -1272,6 +1272,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev); int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
bool amdgpu_device_aspm_support_quirk(void);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes); u64 num_vis_bytes);
...@@ -1391,10 +1392,12 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta ...@@ -1391,10 +1392,12 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
void amdgpu_acpi_detect(void); void amdgpu_acpi_detect(void);
#else #else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_acpi_detect(void) { } static inline void amdgpu_acpi_detect(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
...@@ -1405,11 +1408,9 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, ...@@ -1405,11 +1408,9 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
#else #else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
#endif #endif
......
...@@ -971,6 +971,29 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev) ...@@ -971,6 +971,29 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
return true; return true;
} }
/**
* amdgpu_acpi_should_gpu_reset
*
* @adev: amdgpu_device_pointer
*
* returns true if should reset GPU, false if not
*/
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
if (amdgpu_sriov_vf(adev))
return false;
#if IS_ENABLED(CONFIG_SUSPEND)
return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
#else
return true;
#endif
}
/* /*
* amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods * amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods
* *
...@@ -1042,24 +1065,6 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) ...@@ -1042,24 +1065,6 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
(pm_suspend_target_state == PM_SUSPEND_MEM); (pm_suspend_target_state == PM_SUSPEND_MEM);
} }
/**
* amdgpu_acpi_should_gpu_reset
*
* @adev: amdgpu_device_pointer
*
* returns true if should reset GPU, false if not
*/
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
if (amdgpu_sriov_vf(adev))
return false;
return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
}
/** /**
* amdgpu_acpi_is_s0ix_active * amdgpu_acpi_is_s0ix_active
* *
......
...@@ -80,6 +80,10 @@ ...@@ -80,6 +80,10 @@
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
#if IS_ENABLED(CONFIG_X86)
#include <asm/intel-family.h>
#endif
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
...@@ -1356,6 +1360,17 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev) ...@@ -1356,6 +1360,17 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
return pcie_aspm_enabled(adev->pdev); return pcie_aspm_enabled(adev->pdev);
} }
bool amdgpu_device_aspm_support_quirk(void)
{
#if IS_ENABLED(CONFIG_X86)
struct cpuinfo_x86 *c = &cpu_data(0);
return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
#else
return true;
#endif
}
/* if we get transitioned to only one device, take VGA back */ /* if we get transitioned to only one device, take VGA back */
/** /**
* amdgpu_device_vga_set_decode - enable/disable vga decode * amdgpu_device_vga_set_decode - enable/disable vga decode
......
...@@ -2467,7 +2467,10 @@ static int amdgpu_pmops_freeze(struct device *dev) ...@@ -2467,7 +2467,10 @@ static int amdgpu_pmops_freeze(struct device *dev)
adev->in_s4 = false; adev->in_s4 = false;
if (r) if (r)
return r; return r;
return amdgpu_asic_reset(adev);
if (amdgpu_acpi_should_gpu_reset(adev))
return amdgpu_asic_reset(adev);
return 0;
} }
static int amdgpu_pmops_thaw(struct device *dev) static int amdgpu_pmops_thaw(struct device *dev)
......
...@@ -678,6 +678,15 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) ...@@ -678,6 +678,15 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
ptr = &ring->fence_drv.fences[i]; ptr = &ring->fence_drv.fences[i];
old = rcu_dereference_protected(*ptr, 1); old = rcu_dereference_protected(*ptr, 1);
if (old && old->ops == &amdgpu_job_fence_ops) { if (old && old->ops == &amdgpu_job_fence_ops) {
struct amdgpu_job *job;
/* For non-scheduler bad job, i.e. failed ib test, we need to signal
* it right here or we won't be able to track them in fence_drv
* and they will remain unsignaled during sa_bo free.
*/
job = container_of(old, struct amdgpu_job, hw_fence);
if (!job->base.s_fence && !dma_fence_is_signaled(old))
dma_fence_signal(old);
RCU_INIT_POINTER(*ptr, NULL); RCU_INIT_POINTER(*ptr, NULL);
dma_fence_put(old); dma_fence_put(old);
} }
......
...@@ -1287,6 +1287,11 @@ static int gfx_v11_0_sw_init(void *handle) ...@@ -1287,6 +1287,11 @@ static int gfx_v11_0_sw_init(void *handle)
break; break;
} }
/* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) &&
amdgpu_sriov_is_pp_one_vf(adev))
adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
/* EOP Event */ /* EOP Event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
...@@ -4655,6 +4660,14 @@ static bool gfx_v11_0_check_soft_reset(void *handle) ...@@ -4655,6 +4660,14 @@ static bool gfx_v11_0_check_soft_reset(void *handle)
return false; return false;
} }
static int gfx_v11_0_post_soft_reset(void *handle)
{
/**
* GFX soft reset will impact MES, need resume MES when do GFX soft reset
*/
return amdgpu_mes_resume((struct amdgpu_device *)handle);
}
static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{ {
uint64_t clock; uint64_t clock;
...@@ -6166,6 +6179,7 @@ static const struct amd_ip_funcs gfx_v11_0_ip_funcs = { ...@@ -6166,6 +6179,7 @@ static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
.wait_for_idle = gfx_v11_0_wait_for_idle, .wait_for_idle = gfx_v11_0_wait_for_idle,
.soft_reset = gfx_v11_0_soft_reset, .soft_reset = gfx_v11_0_soft_reset,
.check_soft_reset = gfx_v11_0_check_soft_reset, .check_soft_reset = gfx_v11_0_check_soft_reset,
.post_soft_reset = gfx_v11_0_post_soft_reset,
.set_clockgating_state = gfx_v11_0_set_clockgating_state, .set_clockgating_state = gfx_v11_0_set_clockgating_state,
.set_powergating_state = gfx_v11_0_set_powergating_state, .set_powergating_state = gfx_v11_0_set_powergating_state,
.get_clockgating_state = gfx_v11_0_get_clockgating_state, .get_clockgating_state = gfx_v11_0_get_clockgating_state,
......
...@@ -578,7 +578,7 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev) ...@@ -578,7 +578,7 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
static void nv_program_aspm(struct amdgpu_device *adev) static void nv_program_aspm(struct amdgpu_device *adev)
{ {
if (!amdgpu_device_should_use_aspm(adev)) if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
return; return;
if (!(adev->flags & AMD_IS_APU) && if (!(adev->flags & AMD_IS_APU) &&
......
...@@ -81,10 +81,6 @@ ...@@ -81,10 +81,6 @@
#include "mxgpu_vi.h" #include "mxgpu_vi.h"
#include "amdgpu_dm.h" #include "amdgpu_dm.h"
#if IS_ENABLED(CONFIG_X86)
#include <asm/intel-family.h>
#endif
#define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6 #define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
...@@ -1138,24 +1134,13 @@ static void vi_enable_aspm(struct amdgpu_device *adev) ...@@ -1138,24 +1134,13 @@ static void vi_enable_aspm(struct amdgpu_device *adev)
WREG32_PCIE(ixPCIE_LC_CNTL, data); WREG32_PCIE(ixPCIE_LC_CNTL, data);
} }
static bool aspm_support_quirk_check(void)
{
#if IS_ENABLED(CONFIG_X86)
struct cpuinfo_x86 *c = &cpu_data(0);
return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
#else
return true;
#endif
}
static void vi_program_aspm(struct amdgpu_device *adev) static void vi_program_aspm(struct amdgpu_device *adev)
{ {
u32 data, data1, orig; u32 data, data1, orig;
bool bL1SS = false; bool bL1SS = false;
bool bClkReqSupport = true; bool bClkReqSupport = true;
if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check()) if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
return; return;
if (adev->flags & AMD_IS_APU || if (adev->flags & AMD_IS_APU ||
......
...@@ -7244,7 +7244,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, ...@@ -7244,7 +7244,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
if (!aconnector->mst_root) if (!aconnector->mst_root)
drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
/* This defaults to the max in the range, but we want 8bpc for non-edp. */
aconnector->base.state->max_bpc = 16; aconnector->base.state->max_bpc = 16;
aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
......
...@@ -271,8 +271,7 @@ static void dccg32_set_dpstreamclk( ...@@ -271,8 +271,7 @@ static void dccg32_set_dpstreamclk(
dccg32_set_dtbclk_p_src(dccg, src, otg_inst); dccg32_set_dtbclk_p_src(dccg, src, otg_inst);
/* enabled to select one of the DTBCLKs for pipe */ /* enabled to select one of the DTBCLKs for pipe */
switch (otg_inst) switch (dp_hpo_inst) {
{
case 0: case 0:
REG_UPDATE_2(DPSTREAMCLK_CNTL, REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_EN, DPSTREAMCLK0_EN,
......
...@@ -2186,6 +2186,7 @@ static bool dcn32_resource_construct( ...@@ -2186,6 +2186,7 @@ static bool dcn32_resource_construct(
dc->caps.edp_dsc_support = true; dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true; dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true; dc->caps.dmcub_support = true;
dc->caps.seamless_odm = true;
/* Color pipeline capabilities */ /* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1; dc->caps.color.dpp.dcn_arch = 1;
......
...@@ -676,8 +676,8 @@ static int lt8912_parse_dt(struct lt8912 *lt) ...@@ -676,8 +676,8 @@ static int lt8912_parse_dt(struct lt8912 *lt)
lt->hdmi_port = of_drm_find_bridge(port_node); lt->hdmi_port = of_drm_find_bridge(port_node);
if (!lt->hdmi_port) { if (!lt->hdmi_port) {
dev_err(lt->dev, "%s: Failed to get hdmi port\n", __func__); ret = -EPROBE_DEFER;
ret = -ENODEV; dev_err_probe(lt->dev, ret, "%s: Failed to get hdmi port\n", __func__);
goto err_free_host_node; goto err_free_host_node;
} }
......
...@@ -328,10 +328,17 @@ static const struct dmi_system_id orientation_data[] = { ...@@ -328,10 +328,17 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"), DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
}, },
.driver_data = (void *)&lcd1200x1920_rightside_up, .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Book X90F / X91F / X91L */ }, { /* Lenovo Yoga Book X90F / X90L */
.matches = { .matches = {
/* Non exact match to match all versions */ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Book X91F / X91L */
.matches = {
/* Non exact match to match F + L versions */
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
}, },
.driver_data = (void *)&lcd1200x1920_rightside_up, .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Tablet 2 830F / 830L */ }, { /* Lenovo Yoga Tablet 2 830F / 830L */
......
...@@ -683,6 +683,14 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) ...@@ -683,6 +683,14 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
*/ */
intel_vrr_send_push(new_crtc_state); intel_vrr_send_push(new_crtc_state);
/*
* Seamless M/N update may need to update frame timings.
*
* FIXME Should be synchronized with the start of vblank somehow...
*/
if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
intel_crtc_update_active_timings(new_crtc_state);
local_irq_enable(); local_irq_enable();
if (intel_vgpu_active(dev_priv)) if (intel_vgpu_active(dev_priv))
......
...@@ -5145,6 +5145,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, ...@@ -5145,6 +5145,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
* only fields that are know to not cause problems are preserved. */ * only fields that are know to not cause problems are preserved. */
saved_state->uapi = crtc_state->uapi; saved_state->uapi = crtc_state->uapi;
saved_state->inherited = crtc_state->inherited;
saved_state->scaler_state = crtc_state->scaler_state; saved_state->scaler_state = crtc_state->scaler_state;
saved_state->shared_dpll = crtc_state->shared_dpll; saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state; saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
......
...@@ -384,15 +384,12 @@ static void disable_all_event_handlers(struct drm_i915_private *i915) ...@@ -384,15 +384,12 @@ static void disable_all_event_handlers(struct drm_i915_private *i915)
} }
} }
static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
{ {
enum pipe pipe; enum pipe pipe;
if (DISPLAY_VER(i915) < 13)
return;
/* /*
* Wa_16015201720:adl-p,dg2, mtl * Wa_16015201720:adl-p,dg2
* The WA requires clock gating to be disabled all the time * The WA requires clock gating to be disabled all the time
* for pipe A and B. * for pipe A and B.
* For pipe C and D clock gating needs to be disabled only * For pipe C and D clock gating needs to be disabled only
...@@ -408,6 +405,25 @@ static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) ...@@ -408,6 +405,25 @@ static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
PIPEDMC_GATING_DIS, 0); PIPEDMC_GATING_DIS, 0);
} }
static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915)
{
/*
* Wa_16015201720
* The WA requires clock gating to be disabled all the time
* for pipe A and B.
*/
intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0,
MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B);
}
static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
{
if (DISPLAY_VER(i915) >= 14 && enable)
mtl_pipedmc_clock_gating_wa(i915);
else if (DISPLAY_VER(i915) == 13)
adlp_pipedmc_clock_gating_wa(i915, enable);
}
void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe) void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
{ {
if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe))) if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe)))
......
...@@ -210,6 +210,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -210,6 +210,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
bool prealloc = false; bool prealloc = false;
void __iomem *vaddr; void __iomem *vaddr;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_gem_ww_ctx ww;
int ret; int ret;
mutex_lock(&ifbdev->hpd_lock); mutex_lock(&ifbdev->hpd_lock);
...@@ -283,13 +284,24 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -283,13 +284,24 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->fix.smem_len = vma->size; info->fix.smem_len = vma->size;
} }
vaddr = i915_vma_pin_iomap(vma); for_i915_gem_ww(&ww, ret, false) {
if (IS_ERR(vaddr)) { ret = i915_gem_object_lock(vma->obj, &ww);
drm_err(&dev_priv->drm,
"Failed to remap framebuffer into virtual memory (%pe)\n", vaddr); if (ret)
ret = PTR_ERR(vaddr); continue;
goto out_unpin;
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
drm_err(&dev_priv->drm,
"Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
ret = PTR_ERR(vaddr);
continue;
}
} }
if (ret)
goto out_unpin;
info->screen_base = vaddr; info->screen_base = vaddr;
info->screen_size = vma->size; info->screen_size = vma->size;
......
...@@ -737,12 +737,12 @@ int intel_gt_init(struct intel_gt *gt) ...@@ -737,12 +737,12 @@ int intel_gt_init(struct intel_gt *gt)
if (err) if (err)
goto err_gt; goto err_gt;
intel_uc_init_late(&gt->uc);
err = i915_inject_probe_error(gt->i915, -EIO); err = i915_inject_probe_error(gt->i915, -EIO);
if (err) if (err)
goto err_gt; goto err_gt;
intel_uc_init_late(&gt->uc);
intel_migrate_init(&gt->migrate, gt); intel_migrate_init(&gt->migrate, gt);
goto out_fw; goto out_fw;
......
...@@ -21,31 +21,10 @@ ...@@ -21,31 +21,10 @@
#include "intel_rc6.h" #include "intel_rc6.h"
#include "intel_rps.h" #include "intel_rps.h"
#include "intel_wakeref.h" #include "intel_wakeref.h"
#include "intel_pcode.h"
#include "pxp/intel_pxp_pm.h" #include "pxp/intel_pxp_pm.h"
#define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2) #define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2)
static void mtl_media_busy(struct intel_gt *gt)
{
/* Wa_14017073508: mtl */
if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
gt->type == GT_MEDIA)
snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
PCODE_MBOX_GT_STATE_MEDIA_BUSY,
PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
}
static void mtl_media_idle(struct intel_gt *gt)
{
/* Wa_14017073508: mtl */
if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
gt->type == GT_MEDIA)
snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
PCODE_MBOX_GT_STATE_MEDIA_NOT_BUSY,
PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
}
static void user_forcewake(struct intel_gt *gt, bool suspend) static void user_forcewake(struct intel_gt *gt, bool suspend)
{ {
int count = atomic_read(&gt->user_wakeref); int count = atomic_read(&gt->user_wakeref);
...@@ -93,9 +72,6 @@ static int __gt_unpark(struct intel_wakeref *wf) ...@@ -93,9 +72,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
GT_TRACE(gt, "\n"); GT_TRACE(gt, "\n");
/* Wa_14017073508: mtl */
mtl_media_busy(gt);
/* /*
* It seems that the DMC likes to transition between the DC states a lot * It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during * when there are no connected displays (no active power domains) during
...@@ -145,9 +121,6 @@ static int __gt_park(struct intel_wakeref *wf) ...@@ -145,9 +121,6 @@ static int __gt_park(struct intel_wakeref *wf)
GEM_BUG_ON(!wakeref); GEM_BUG_ON(!wakeref);
intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref); intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
/* Wa_14017073508: mtl */
mtl_media_idle(gt);
return 0; return 0;
} }
......
...@@ -580,7 +580,7 @@ static bool perf_limit_reasons_eval(void *data) ...@@ -580,7 +580,7 @@ static bool perf_limit_reasons_eval(void *data)
} }
DEFINE_SIMPLE_ATTRIBUTE(perf_limit_reasons_fops, perf_limit_reasons_get, DEFINE_SIMPLE_ATTRIBUTE(perf_limit_reasons_fops, perf_limit_reasons_get,
perf_limit_reasons_clear, "%llu\n"); perf_limit_reasons_clear, "0x%llx\n");
void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root) void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root)
{ {
......
...@@ -486,6 +486,7 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6) ...@@ -486,6 +486,7 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
static bool rc6_supported(struct intel_rc6 *rc6) static bool rc6_supported(struct intel_rc6 *rc6)
{ {
struct drm_i915_private *i915 = rc6_to_i915(rc6); struct drm_i915_private *i915 = rc6_to_i915(rc6);
struct intel_gt *gt = rc6_to_gt(rc6);
if (!HAS_RC6(i915)) if (!HAS_RC6(i915))
return false; return false;
...@@ -502,6 +503,13 @@ static bool rc6_supported(struct intel_rc6 *rc6) ...@@ -502,6 +503,13 @@ static bool rc6_supported(struct intel_rc6 *rc6)
return false; return false;
} }
if (IS_MTL_MEDIA_STEP(gt->i915, STEP_A0, STEP_B0) &&
gt->type == GT_MEDIA) {
drm_notice(&i915->drm,
"Media RC6 disabled on A step\n");
return false;
}
return true; return true;
} }
......
...@@ -1571,6 +1571,27 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf, ...@@ -1571,6 +1571,27 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
#endif //CONFIG_DRM_I915_CAPTURE_ERROR #endif //CONFIG_DRM_I915_CAPTURE_ERROR
static void guc_capture_find_ecode(struct intel_engine_coredump *ee)
{
struct gcap_reg_list_info *reginfo;
struct guc_mmio_reg *regs;
i915_reg_t reg_ipehr = RING_IPEHR(0);
i915_reg_t reg_instdone = RING_INSTDONE(0);
int i;
if (!ee->guc_capture_node)
return;
reginfo = ee->guc_capture_node->reginfo + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE;
regs = reginfo->regs;
for (i = 0; i < reginfo->num_regs; i++) {
if (regs[i].offset == reg_ipehr.reg)
ee->ipehr = regs[i].value;
else if (regs[i].offset == reg_instdone.reg)
ee->instdone.instdone = regs[i].value;
}
}
void intel_guc_capture_free_node(struct intel_engine_coredump *ee) void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
{ {
if (!ee || !ee->guc_capture_node) if (!ee || !ee->guc_capture_node)
...@@ -1612,6 +1633,7 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt, ...@@ -1612,6 +1633,7 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
list_del(&n->link); list_del(&n->link);
ee->guc_capture_node = n; ee->guc_capture_node = n;
ee->guc_capture = guc->capture; ee->guc_capture = guc->capture;
guc_capture_find_ecode(ee);
return; return;
} }
} }
......
...@@ -11,20 +11,9 @@ ...@@ -11,20 +11,9 @@
static bool __guc_rc_supported(struct intel_guc *guc) static bool __guc_rc_supported(struct intel_guc *guc)
{ {
struct intel_gt *gt = guc_to_gt(guc);
/*
* Wa_14017073508: mtl
* Do not enable gucrc to avoid additional interrupts which
* may disrupt pcode wa.
*/
if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
gt->type == GT_MEDIA)
return false;
/* GuC RC is unavailable for pre-Gen12 */ /* GuC RC is unavailable for pre-Gen12 */
return guc->submission_supported && return guc->submission_supported &&
GRAPHICS_VER(gt->i915) >= 12; GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
} }
static bool __guc_rc_selected(struct intel_guc *guc) static bool __guc_rc_selected(struct intel_guc *guc)
......
...@@ -92,8 +92,7 @@ static void debug_active_init(struct i915_active *ref) ...@@ -92,8 +92,7 @@ static void debug_active_init(struct i915_active *ref)
static void debug_active_activate(struct i915_active *ref) static void debug_active_activate(struct i915_active *ref)
{ {
lockdep_assert_held(&ref->tree_lock); lockdep_assert_held(&ref->tree_lock);
if (!atomic_read(&ref->count)) /* before the first inc */ debug_object_activate(ref, &active_debug_desc);
debug_object_activate(ref, &active_debug_desc);
} }
static void debug_active_deactivate(struct i915_active *ref) static void debug_active_deactivate(struct i915_active *ref)
......
...@@ -687,11 +687,6 @@ hwm_get_preregistration_info(struct drm_i915_private *i915) ...@@ -687,11 +687,6 @@ hwm_get_preregistration_info(struct drm_i915_private *i915)
for_each_gt(gt, i915, i) for_each_gt(gt, i915, i)
hwm_energy(&hwmon->ddat_gt[i], &energy); hwm_energy(&hwmon->ddat_gt[i], &energy);
} }
/* Enable PL1 power limit */
if (i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit))
hwm_locked_with_pm_intel_uncore_rmw(ddat, hwmon->rg.pkg_rapl_limit,
PKG_PWR_LIM_1_EN, PKG_PWR_LIM_1_EN);
} }
void i915_hwmon_register(struct drm_i915_private *i915) void i915_hwmon_register(struct drm_i915_private *i915)
......
...@@ -1786,9 +1786,11 @@ ...@@ -1786,9 +1786,11 @@
* GEN9 clock gating regs * GEN9 clock gating regs
*/ */
#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530) #define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
#define DARBF_GATING_DIS (1 << 27) #define DARBF_GATING_DIS REG_BIT(27)
#define PWM2_GATING_DIS (1 << 14) #define MTL_PIPEDMC_GATING_DIS_A REG_BIT(15)
#define PWM1_GATING_DIS (1 << 13) #define MTL_PIPEDMC_GATING_DIS_B REG_BIT(14)
#define PWM2_GATING_DIS REG_BIT(14)
#define PWM1_GATING_DIS REG_BIT(13)
#define GEN9_CLKGATE_DIS_3 _MMIO(0x46538) #define GEN9_CLKGATE_DIS_3 _MMIO(0x46538)
#define TGL_VRH_GATING_DIS REG_BIT(31) #define TGL_VRH_GATING_DIS REG_BIT(31)
...@@ -6596,15 +6598,6 @@ ...@@ -6596,15 +6598,6 @@
/* XEHP_PCODE_FREQUENCY_CONFIG param2 */ /* XEHP_PCODE_FREQUENCY_CONFIG param2 */
#define PCODE_MBOX_DOMAIN_NONE 0x0 #define PCODE_MBOX_DOMAIN_NONE 0x0
#define PCODE_MBOX_DOMAIN_MEDIAFF 0x3 #define PCODE_MBOX_DOMAIN_MEDIAFF 0x3
/* Wa_14017210380: mtl */
#define PCODE_MBOX_GT_STATE 0x50
/* sub-commands (param1) */
#define PCODE_MBOX_GT_STATE_MEDIA_BUSY 0x1
#define PCODE_MBOX_GT_STATE_MEDIA_NOT_BUSY 0x2
/* param2 */
#define PCODE_MBOX_GT_STATE_DOMAIN_MEDIA 0x1
#define GEN6_PCODE_DATA _MMIO(0x138128) #define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
......
...@@ -325,23 +325,23 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) ...@@ -325,23 +325,23 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
ret = meson_encoder_hdmi_init(priv); ret = meson_encoder_hdmi_init(priv);
if (ret) if (ret)
goto exit_afbcd; goto unbind_all;
ret = meson_plane_create(priv); ret = meson_plane_create(priv);
if (ret) if (ret)
goto exit_afbcd; goto unbind_all;
ret = meson_overlay_create(priv); ret = meson_overlay_create(priv);
if (ret) if (ret)
goto exit_afbcd; goto unbind_all;
ret = meson_crtc_create(priv); ret = meson_crtc_create(priv);
if (ret) if (ret)
goto exit_afbcd; goto unbind_all;
ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm); ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm);
if (ret) if (ret)
goto exit_afbcd; goto unbind_all;
drm_mode_config_reset(drm); drm_mode_config_reset(drm);
...@@ -359,6 +359,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) ...@@ -359,6 +359,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
uninstall_irq: uninstall_irq:
free_irq(priv->vsync_irq, drm); free_irq(priv->vsync_irq, drm);
unbind_all:
if (has_components)
component_unbind_all(drm->dev, drm);
exit_afbcd: exit_afbcd:
if (priv->afbcd.ops) if (priv->afbcd.ops)
priv->afbcd.ops->exit(priv); priv->afbcd.ops->exit(priv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment