Commit 38624d2c authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-fixes-2022-12-15' of...

Merge tag 'drm-intel-next-fixes-2022-12-15' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

- Documentation fixe (Matt, Miaoqian)
- OA-perf related fix (Umesh)
- VLV/CHV HDMI/DP audio fix (Ville)
- Display DDI/Transcoder fix (Khaled)
- Migrate fixes (Chris, Matt)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/Y5uFYOJ/1jgf2eSE@intel.com
parents 5504eb16 ad0fca2d
......@@ -673,8 +673,6 @@ static void intel_enable_dp(struct intel_atomic_state *state,
intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
intel_dp_start_link_train(intel_dp, pipe_config);
intel_dp_stop_link_train(intel_dp, pipe_config);
intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void g4x_enable_dp(struct intel_atomic_state *state,
......@@ -683,6 +681,7 @@ static void g4x_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
intel_enable_dp(state, encoder, pipe_config, conn_state);
intel_audio_codec_enable(encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
}
......@@ -691,6 +690,7 @@ static void vlv_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
intel_audio_codec_enable(encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
}
......
......@@ -157,10 +157,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
&pipe_config->infoframes.hdmi);
}
static void g4x_enable_hdmi(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
static void g4x_hdmi_enable_port(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
......@@ -175,6 +173,16 @@ static void g4x_enable_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
}
static void g4x_enable_hdmi(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
g4x_hdmi_enable_port(encoder, pipe_config);
drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
!pipe_config->has_hdmi_sink);
......@@ -294,6 +302,11 @@ static void vlv_enable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
!pipe_config->has_hdmi_sink);
intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void intel_disable_hdmi(struct intel_atomic_state *state,
......@@ -415,7 +428,7 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state,
pipe_config->has_infoframe,
pipe_config, conn_state);
g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
g4x_hdmi_enable_port(encoder, pipe_config);
vlv_wait_port_ready(dev_priv, dig_port, 0x0);
}
......@@ -492,7 +505,7 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state,
pipe_config->has_infoframe,
pipe_config, conn_state);
g4x_enable_hdmi(state, encoder, pipe_config, conn_state);
g4x_hdmi_enable_port(encoder, pipe_config);
vlv_wait_port_ready(dev_priv, dig_port, 0x0);
......
......@@ -3679,61 +3679,6 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
}
}
static void
intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
enum pipe pipe = crtc->pipe;
u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
trans_ddi_func_ctl_value = intel_de_read(dev_priv,
TRANS_DDI_FUNC_CTL(pipe));
trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
TGL_TRANS_DDI_PORT_MASK);
trans_conf_value &= ~PIPECONF_ENABLE;
dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
trans_ddi_func_ctl_value);
intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
}
static void
intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = dig_port->base.port;
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
enum pipe pipe = crtc->pipe;
u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
trans_ddi_func_ctl_value = intel_de_read(dev_priv,
TRANS_DDI_FUNC_CTL(pipe));
trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
TGL_TRANS_DDI_SELECT_PORT(port);
trans_conf_value |= PIPECONF_ENABLE;
dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
trans_ddi_func_ctl_value);
}
static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
......@@ -3752,14 +3697,10 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
link_status);
intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
intel_dp_phy_pattern_update(intel_dp, crtc_state);
intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
intel_dp->train_set, crtc_state->lane_count);
......
......@@ -785,6 +785,9 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
return false;
if (obj->flags & I915_BO_ALLOC_CCS_AUX)
return true;
for (i = 0; i < obj->mm.n_placements; i++) {
/* Compression is not allowed for the objects with smem placement */
if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
......
......@@ -327,16 +327,18 @@ struct drm_i915_gem_object {
* dealing with userspace objects the CPU fault handler is free to ignore this.
*/
#define I915_BO_ALLOC_GPU_ONLY BIT(6)
#define I915_BO_ALLOC_CCS_AUX BIT(7)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
I915_BO_ALLOC_CPU_CLEAR | \
I915_BO_ALLOC_USER | \
I915_BO_ALLOC_PM_VOLATILE | \
I915_BO_ALLOC_PM_EARLY | \
I915_BO_ALLOC_GPU_ONLY)
#define I915_BO_READONLY BIT(7)
#define I915_TILING_QUIRK_BIT 8 /* unknown swizzling; do not release! */
#define I915_BO_PROTECTED BIT(9)
I915_BO_ALLOC_GPU_ONLY | \
I915_BO_ALLOC_CCS_AUX)
#define I915_BO_READONLY BIT(8)
#define I915_TILING_QUIRK_BIT 9 /* unknown swizzling; do not release! */
#define I915_BO_PROTECTED BIT(10)
/**
* @mem_flags - Mutable placement-related flags
*
......
......@@ -50,6 +50,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
container_of(bo->bdev, typeof(*i915), bdev);
struct drm_i915_gem_object *backup;
struct ttm_operation_ctx ctx = {};
unsigned int flags;
int err = 0;
if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
......@@ -65,7 +66,22 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
return 0;
backup = i915_gem_object_create_shmem(i915, obj->base.size);
/*
* It seems that we might have some framebuffers still pinned at this
* stage, but for such objects we might also need to deal with the CCS
* aux state. Make sure we force the save/restore of the CCS state,
* otherwise we might observe display corruption, when returning from
* suspend.
*/
flags = 0;
if (i915_gem_object_needs_ccs_pages(obj)) {
WARN_ON_ONCE(!i915_gem_object_is_framebuffer(obj));
WARN_ON_ONCE(!pm_apply->allow_gpu);
flags = I915_BO_ALLOC_CCS_AUX;
}
backup = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
obj->base.size, 0, flags);
if (IS_ERR(backup))
return PTR_ERR(backup);
......
......@@ -702,7 +702,7 @@ void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
}
/**
* intel_gt_mcr_wait_for_reg_fw - wait until MCR register matches expected state
* intel_gt_mcr_wait_for_reg - wait until MCR register matches expected state
* @gt: GT structure
* @reg: the register to read
* @mask: mask to apply to register value
......
......@@ -342,6 +342,16 @@ static int emit_no_arbitration(struct i915_request *rq)
return 0;
}
static int max_pte_pkt_size(struct i915_request *rq, int pkt)
{
struct intel_ring *ring = rq->ring;
pkt = min_t(int, pkt, (ring->space - rq->reserved_space) / sizeof(u32) + 5);
pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
return pkt;
}
static int emit_pte(struct i915_request *rq,
struct sgt_dma *it,
enum i915_cache_level cache_level,
......@@ -388,8 +398,7 @@ static int emit_pte(struct i915_request *rq,
return PTR_ERR(cs);
/* Pack as many PTE updates as possible into a single MI command */
pkt = min_t(int, dword_length, ring->space / sizeof(u32) + 5);
pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
pkt = max_pte_pkt_size(rq, dword_length);
hdr = cs;
*cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */
......@@ -422,8 +431,7 @@ static int emit_pte(struct i915_request *rq,
}
}
pkt = min_t(int, dword_rem, ring->space / sizeof(u32) + 5);
pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
pkt = max_pte_pkt_size(rq, dword_rem);
hdr = cs;
*cs++ = MI_STORE_DATA_IMM | REG_BIT(21);
......@@ -829,14 +837,35 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
/*
* While we can't always restore/manage the CCS state,
* we still need to ensure we don't leak the CCS state
* from the previous user, so make sure we overwrite it
* with something.
*/
err = emit_copy_ccs(rq, dst_offset, INDIRECT_ACCESS,
dst_offset, DIRECT_ACCESS, len);
if (src_is_lmem) {
/*
* If the src is already in lmem, then we must
* be doing an lmem -> lmem transfer, and so
* should be safe to directly copy the CCS
* state. In this case we have either
* initialised the CCS aux state when first
* clearing the pages (since it is already
* allocated in lmem), or the user has
* potentially populated it, in which case we
* need to copy the CCS state as-is.
*/
err = emit_copy_ccs(rq,
dst_offset, INDIRECT_ACCESS,
src_offset, INDIRECT_ACCESS,
len);
} else {
/*
* While we can't always restore/manage the CCS
* state, we still need to ensure we don't leak
* the CCS state from the previous user, so make
* sure we overwrite it with something.
*/
err = emit_copy_ccs(rq,
dst_offset, INDIRECT_ACCESS,
dst_offset, DIRECT_ACCESS,
len);
}
if (err)
goto out_rq;
......
......@@ -1383,6 +1383,9 @@ static u32 oa_context_image_offset(struct intel_context *ce, u32 reg)
u32 offset, len = (ce->engine->context_size - PAGE_SIZE) / 4;
u32 *state = ce->lrc_reg_state;
if (drm_WARN_ON(&ce->engine->i915->drm, !state))
return U32_MAX;
for (offset = 0; offset < len; ) {
if (IS_MI_LRI_CMD(state[offset])) {
/*
......@@ -1447,7 +1450,8 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
if (IS_ERR(ce))
return PTR_ERR(ce);
if (engine_supports_mi_query(stream->engine)) {
if (engine_supports_mi_query(stream->engine) &&
HAS_LOGICAL_RING_CONTEXTS(stream->perf->i915)) {
/*
* We are enabling perf query here. If we don't find the context
* offset here, just return an error.
......
......@@ -824,9 +824,9 @@ void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
}
/**
* intel_uncore_forcewake_put__locked - grab forcewake domain references
* intel_uncore_forcewake_put__locked - release forcewake domain references
* @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to get reference on
* @fw_domains: forcewake domains to put references
*
* See intel_uncore_forcewake_put(). This variant places the onus
* on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment