Commit b01f596a authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2024-03-28' of...

Merge tag 'drm-intel-fixes-2024-03-28' of https://anongit.freedesktop.org/git/drm/drm-intel into drm-fixes

Core/GT Fixes:
- Fix for BUG_ON/BUILD_BUG_ON IN I915_memcpy.c (Joonas)
- Update a MTL workaround (Tejas)
- Fix locking inversion in hwmon's sysfs (Janusz)
- Remove a bogus error message around PXP (Jose)
- Fix UAF on VMA (Janusz)
- Reset queue_priority_hint on parking (Chris)

Display Fixes:
- Remove duplicated audio enable/disable on SDVO and DP (Ville)
- Disable AuxCCS for Xe driver (Juha-Pekka)
- Revert init order of MIPI DSI (Ville)
- DRRS debugfs fix with an extra refactor patch (Bhanuprakash)
- VRR related fixes (Ville)
- Fix a JSL eDP corruption (Jonathon)
- Fix the cursor physical dma address (Ville)
- BIOS VBT related fix (Ville)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZgYaIVgjIs30mIvS@intel.com
parents 2f73503e 32e39bab
......@@ -717,7 +717,6 @@ static void g4x_enable_dp(struct intel_atomic_state *state,
{
intel_enable_dp(state, encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
encoder->audio_enable(encoder, pipe_config, conn_state);
}
static void vlv_enable_dp(struct intel_atomic_state *state,
......@@ -726,7 +725,6 @@ static void vlv_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
intel_edp_backlight_on(pipe_config, conn_state);
encoder->audio_enable(encoder, pipe_config, conn_state);
}
static void g4x_pre_enable_dp(struct intel_atomic_state *state,
......
......@@ -1155,7 +1155,6 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
}
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
/* ensure all panel commands dispatched before enabling transcoder */
wait_for_cmds_dispatched_to_panel(encoder);
......@@ -1256,6 +1255,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
/* step7: enable backlight */
intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
......
......@@ -1955,16 +1955,12 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
* these devices we split the init OTP sequence into a deassert sequence and
* the actual init OTP part.
*/
static void fixup_mipi_sequences(struct drm_i915_private *i915,
struct intel_panel *panel)
static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
struct intel_panel *panel)
{
u8 *init_otp;
int len;
/* Limit this to VLV for now. */
if (!IS_VALLEYVIEW(i915))
return;
/* Limit this to v1 vid-mode sequences */
if (panel->vbt.dsi.config->is_cmd_mode ||
panel->vbt.dsi.seq_version != 1)
......@@ -2000,6 +1996,41 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915,
panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
}
/*
* Some machines (eg. Lenovo 82TQ) appear to have broken
* VBT sequences:
* - INIT_OTP is not present at all
* - what should be in INIT_OTP is in DISPLAY_ON
* - what should be in DISPLAY_ON is in BACKLIGHT_ON
* (along with the actual backlight stuff)
*
* To make those work we simply swap DISPLAY_ON and INIT_OTP.
*
* TODO: Do we need to limit this to specific machines,
* or examine the contents of the sequences to
* avoid false positives?
*/
static void icl_fixup_mipi_sequences(struct drm_i915_private *i915,
struct intel_panel *panel)
{
if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] &&
panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]) {
drm_dbg_kms(&i915->drm, "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n");
swap(panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP],
panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]);
}
}
static void fixup_mipi_sequences(struct drm_i915_private *i915,
struct intel_panel *panel)
{
if (DISPLAY_VER(i915) >= 11)
icl_fixup_mipi_sequences(i915, panel);
else if (IS_VALLEYVIEW(i915))
vlv_fixup_mipi_sequences(i915, panel);
}
static void
parse_mipi_sequence(struct drm_i915_private *i915,
struct intel_panel *panel)
......@@ -3351,6 +3382,9 @@ bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_da
{
const struct child_device_config *child = &devdata->child;
if (!devdata)
return false;
if (!intel_bios_encoder_supports_dp(devdata) ||
!intel_bios_encoder_supports_hdmi(devdata))
return false;
......
......@@ -36,12 +36,10 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->uapi.plane->dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
if (DISPLAY_INFO(dev_priv)->cursor_needs_physical)
base = i915_gem_object_get_dma_address(obj, 0);
base = plane_state->phys_dma_addr;
else
base = intel_plane_ggtt_offset(plane_state);
......
......@@ -727,6 +727,7 @@ struct intel_plane_state {
#define PLANE_HAS_FENCE BIT(0)
struct intel_fb_view view;
u32 phys_dma_addr; /* for cursor_needs_physical */
/* Plane pxp decryption state */
bool decrypt;
......
......@@ -67,6 +67,7 @@
#include "intel_dp_tunnel.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_drrs.h"
#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
......@@ -2683,15 +2684,6 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
}
static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915,
enum transcoder cpu_transcoder)
{
if (HAS_DOUBLE_BUFFERED_M_N(i915))
return true;
return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
}
static bool can_enable_drrs(struct intel_connector *connector,
const struct intel_crtc_state *pipe_config,
const struct drm_display_mode *downclock_mode)
......@@ -2714,7 +2706,7 @@ static bool can_enable_drrs(struct intel_connector *connector,
if (pipe_config->has_pch_encoder)
return false;
if (!cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
if (!intel_cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
return false;
return downclock_mode &&
......
......@@ -2554,7 +2554,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
static bool
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
{
return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
return ((IS_ELKHARTLAKE(i915) &&
IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
i915->display.dpll.ref_clks.nssc == 38400;
......
......@@ -63,6 +63,15 @@ const char *intel_drrs_type_str(enum drrs_type drrs_type)
return str[drrs_type];
}
bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
enum transcoder cpu_transcoder)
{
if (HAS_DOUBLE_BUFFERED_M_N(i915))
return true;
return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
}
static void
intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc,
enum drrs_refresh_rate refresh_rate)
......@@ -312,9 +321,8 @@ static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
mutex_lock(&crtc->drrs.mutex);
seq_printf(m, "DRRS capable: %s\n",
str_yes_no(crtc_state->has_drrs ||
HAS_DOUBLE_BUFFERED_M_N(i915) ||
intel_cpu_transcoder_has_m2_n2(i915, crtc_state->cpu_transcoder)));
str_yes_no(intel_cpu_transcoder_has_drrs(i915,
crtc_state->cpu_transcoder)));
seq_printf(m, "DRRS enabled: %s\n",
str_yes_no(crtc_state->has_drrs));
......
......@@ -9,12 +9,15 @@
#include <linux/types.h>
enum drrs_type;
enum transcoder;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_connector;
bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915,
enum transcoder cpu_transcoder);
const char *intel_drrs_type_str(enum drrs_type drrs_type);
bool intel_drrs_is_active(struct intel_crtc *crtc);
void intel_drrs_activate(const struct intel_crtc_state *crtc_state);
......
......@@ -340,6 +340,17 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
return max(0, vblank_start - intel_usecs_to_scanlines(adjusted_mode, latency));
}
static u32 dsb_chicken(struct intel_crtc *crtc)
{
if (crtc->mode_flags & I915_MODE_FLAG_VRR)
return DSB_CTRL_WAIT_SAFE_WINDOW |
DSB_CTRL_NO_WAIT_VBLANK |
DSB_INST_WAIT_SAFE_WINDOW |
DSB_INST_NO_WAIT_VBLANK;
else
return 0;
}
static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
int dewake_scanline)
{
......@@ -361,6 +372,9 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id),
ctrl | DSB_ENABLE);
intel_de_write_fw(dev_priv, DSB_CHICKEN(pipe, dsb->id),
dsb_chicken(crtc));
intel_de_write_fw(dev_priv, DSB_HEAD(pipe, dsb->id),
intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
......
......@@ -255,6 +255,16 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
return PTR_ERR(vma);
plane_state->ggtt_vma = vma;
/*
* Pre-populate the dma address before we enter the vblank
* evade critical section as i915_gem_object_get_dma_address()
* will trigger might_sleep() even if it won't actually sleep,
* which is the case when the fb has already been pinned.
*/
if (phys_cursor)
plane_state->phys_dma_addr =
i915_gem_object_get_dma_address(intel_fb_obj(fb), 0);
} else {
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
......
......@@ -1842,8 +1842,6 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
encoder->audio_disable(encoder, old_crtc_state, conn_state);
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo,
......@@ -1935,8 +1933,6 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_ON);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo_connector->output_flag);
encoder->audio_enable(encoder, pipe_config, conn_state);
}
static enum drm_mode_status
......
......@@ -187,10 +187,11 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/*
* TRANS_SET_CONTEXT_LATENCY with VRR enabled
* requires this chicken bit on ADL/DG2.
* This bit seems to have two meanings depending on the platform:
* TGL: generate VRR "safe window" for DSB vblank waits
* ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
*/
if (DISPLAY_VER(dev_priv) == 13)
if (IS_DISPLAY_VER(dev_priv, 12, 13))
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
0, PIPE_VBLANK_WITH_DELAY);
......
......@@ -2295,6 +2295,9 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915,
if (HAS_4TILE(i915))
caps |= INTEL_PLANE_CAP_TILING_4;
if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915))
return caps;
if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
caps |= INTEL_PLANE_CAP_CCS_RC;
if (DISPLAY_VER(i915) >= 12)
......
......@@ -279,9 +279,6 @@ static int __engine_park(struct intel_wakeref *wf)
intel_engine_park_heartbeat(engine);
intel_breadcrumbs_park(engine->breadcrumbs);
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
if (engine->park)
engine->park(engine);
......
......@@ -3272,6 +3272,9 @@ static void execlists_park(struct intel_engine_cs *engine)
{
cancel_timer(&engine->execlists.timer);
cancel_timer(&engine->execlists.preempt);
/* Reset upon idling, or we may delay the busy wakeup. */
WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
}
static void add_to_engine(struct i915_request *rq)
......
......@@ -1653,6 +1653,7 @@ static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* Wa_14018575942 / Wa_18018781329 */
wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_22016670082 */
......
......@@ -800,7 +800,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_cleanup_modeset2;
ret = intel_pxp_init(i915);
if (ret != -ENODEV)
if (ret && ret != -ENODEV)
drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);
ret = intel_display_driver_probe(i915);
......
......@@ -72,12 +72,13 @@ hwm_locked_with_pm_intel_uncore_rmw(struct hwm_drvdata *ddat,
struct intel_uncore *uncore = ddat->uncore;
intel_wakeref_t wakeref;
mutex_lock(&hwmon->hwmon_lock);
with_intel_runtime_pm(uncore->rpm, wakeref) {
mutex_lock(&hwmon->hwmon_lock);
with_intel_runtime_pm(uncore->rpm, wakeref)
intel_uncore_rmw(uncore, reg, clear, set);
mutex_unlock(&hwmon->hwmon_lock);
mutex_unlock(&hwmon->hwmon_lock);
}
}
/*
......@@ -136,20 +137,21 @@ hwm_energy(struct hwm_drvdata *ddat, long *energy)
else
rgaddr = hwmon->rg.energy_status_all;
mutex_lock(&hwmon->hwmon_lock);
with_intel_runtime_pm(uncore->rpm, wakeref) {
mutex_lock(&hwmon->hwmon_lock);
with_intel_runtime_pm(uncore->rpm, wakeref)
reg_val = intel_uncore_read(uncore, rgaddr);
if (reg_val >= ei->reg_val_prev)
ei->accum_energy += reg_val - ei->reg_val_prev;
else
ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
ei->reg_val_prev = reg_val;
if (reg_val >= ei->reg_val_prev)
ei->accum_energy += reg_val - ei->reg_val_prev;
else
ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
ei->reg_val_prev = reg_val;
*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
hwmon->scl_shift_energy);
mutex_unlock(&hwmon->hwmon_lock);
*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
hwmon->scl_shift_energy);
mutex_unlock(&hwmon->hwmon_lock);
}
}
static ssize_t
......@@ -404,6 +406,7 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
/* Block waiting for GuC reset to complete when needed */
for (;;) {
wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
mutex_lock(&hwmon->hwmon_lock);
prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
......@@ -417,14 +420,13 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
}
mutex_unlock(&hwmon->hwmon_lock);
intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
schedule();
}
finish_wait(&ddat->waitq, &wait);
if (ret)
goto unlock;
wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
goto exit;
/* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */
if (val == PL1_DISABLE) {
......@@ -444,9 +446,8 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
exit:
intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
unlock:
mutex_unlock(&hwmon->hwmon_lock);
intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
return ret;
}
......
......@@ -26,6 +26,7 @@
#include <linux/string.h>
#include <linux/cpufeature.h>
#include <linux/bug.h>
#include <linux/build_bug.h>
#include <asm/fpu/api.h>
#include "i915_memcpy.h"
......
......@@ -4599,7 +4599,7 @@
#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
_MTL_CHICKEN_TRANS_A, \
_MTL_CHICKEN_TRANS_B)
#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* ADL/DG2 */
#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
......
......@@ -34,6 +34,7 @@
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_tlb.h"
......@@ -103,12 +104,42 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref)
static int __i915_vma_active(struct i915_active *ref)
{
return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
struct i915_vma *vma = active_to_vma(ref);
if (!i915_vma_tryget(vma))
return -ENOENT;
/*
* Exclude global GTT VMA from holding a GT wakeref
* while active, otherwise GPU never goes idle.
*/
if (!i915_vma_is_ggtt(vma)) {
/*
* Since we and our _retire() counterpart can be
* called asynchronously, storing a wakeref tracking
* handle inside struct i915_vma is not safe, and
* there is no other good place for that. Hence,
* use untracked variants of intel_gt_pm_get/put().
*/
intel_gt_pm_get_untracked(vma->vm->gt);
}
return 0;
}
static void __i915_vma_retire(struct i915_active *ref)
{
i915_vma_put(active_to_vma(ref));
struct i915_vma *vma = active_to_vma(ref);
if (!i915_vma_is_ggtt(vma)) {
/*
* Since we can be called from atomic contexts,
* use an async variant of intel_gt_pm_put().
*/
intel_gt_pm_put_async_untracked(vma->vm->gt);
}
i915_vma_put(vma);
}
static struct i915_vma *
......@@ -1404,7 +1435,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
struct i915_vma_work *work = NULL;
struct dma_fence *moving = NULL;
struct i915_vma_resource *vma_res = NULL;
intel_wakeref_t wakeref = 0;
intel_wakeref_t wakeref;
unsigned int bound;
int err;
......@@ -1424,8 +1455,14 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err)
return err;
if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
/*
* In case of a global GTT, we must hold a runtime-pm wakeref
* while global PTEs are updated. In other cases, we hold
* the rpm reference while the VMA is active. Since runtime
* resume may require allocations, which are forbidden inside
* vm->mutex, get the first rpm wakeref outside of the mutex.
*/
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
if (flags & vma->vm->bind_async_flags) {
/* lock VM */
......@@ -1561,8 +1598,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (work)
dma_fence_work_commit_imm(&work->base);
err_rpm:
if (wakeref)
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
if (moving)
dma_fence_put(moving);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment