Commit effc0905 authored by Matt Roper's avatar Matt Roper Committed by Jani Nikula

drm/i915/pvc: Annotate two more workaround/tuning registers as MCR

XEHPC_LNCFMISCCFGREG0 and XEHPC_L3SCRUB are both in MCR register ranges
on PVC (with HALFBSLICE and L3BANK replication respectively), so they
should be explicitly declared as MCR registers and use MCR-aware
workaround handlers.

The workarounds/tuning settings should still be applied properly on PVC
even without the MCR annotation, but readback verification on
CONFIG_DRM_I915_DEBUG_GEM builds could potentitally give false positive
"workaround lost on load" warnings on parts fused such that a unicast
read targets a terminated register instance.

Fixes: a9e69428 ("drm/i915: Define MCR registers explicitly")
Signed-off-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Reviewed-by: default avatarGustavo Sousa <gustavo.sousa@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230201222831.608281-1-matthew.d.roper@intel.com
(cherry picked from commit 4039e442)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parent eb66553d
...@@ -979,7 +979,7 @@ ...@@ -979,7 +979,7 @@
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1 << 19) #define GEN7_L3AGDIS (1 << 19)
#define XEHPC_LNCFMISCCFGREG0 _MMIO(0xb01c) #define XEHPC_LNCFMISCCFGREG0 MCR_REG(0xb01c)
#define XEHPC_HOSTCACHEEN REG_BIT(1) #define XEHPC_HOSTCACHEEN REG_BIT(1)
#define XEHPC_OVRLSCCC REG_BIT(0) #define XEHPC_OVRLSCCC REG_BIT(0)
...@@ -1042,7 +1042,7 @@ ...@@ -1042,7 +1042,7 @@
#define XEHP_L3SCQREG7 MCR_REG(0xb188) #define XEHP_L3SCQREG7 MCR_REG(0xb188)
#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3) #define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
#define XEHPC_L3SCRUB _MMIO(0xb18c) #define XEHPC_L3SCRUB MCR_REG(0xb18c)
#define SCRUB_CL_DWNGRADE_SHARED REG_BIT(12) #define SCRUB_CL_DWNGRADE_SHARED REG_BIT(12)
#define SCRUB_RATE_PER_BANK_MASK REG_GENMASK(2, 0) #define SCRUB_RATE_PER_BANK_MASK REG_GENMASK(2, 0)
#define SCRUB_RATE_4B_PER_CLK REG_FIELD_PREP(SCRUB_RATE_PER_BANK_MASK, 0x6) #define SCRUB_RATE_4B_PER_CLK REG_FIELD_PREP(SCRUB_RATE_PER_BANK_MASK, 0x6)
......
...@@ -240,6 +240,12 @@ wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set) ...@@ -240,6 +240,12 @@ wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
wa_write_clr_set(wal, reg, ~0, set); wa_write_clr_set(wal, reg, ~0, set);
} }
static void
wa_mcr_write(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
{
wa_mcr_write_clr_set(wal, reg, ~0, set);
}
static void static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set) wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{ {
...@@ -2970,9 +2976,9 @@ add_render_compute_tuning_settings(struct drm_i915_private *i915, ...@@ -2970,9 +2976,9 @@ add_render_compute_tuning_settings(struct drm_i915_private *i915,
struct i915_wa_list *wal) struct i915_wa_list *wal)
{ {
if (IS_PONTEVECCHIO(i915)) { if (IS_PONTEVECCHIO(i915)) {
wa_write(wal, XEHPC_L3SCRUB, wa_mcr_write(wal, XEHPC_L3SCRUB,
SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK); SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
wa_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN); wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
} }
if (IS_DG2(i915)) { if (IS_DG2(i915)) {
...@@ -3062,7 +3068,7 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li ...@@ -3062,7 +3068,7 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
if (IS_PONTEVECCHIO(i915)) { if (IS_PONTEVECCHIO(i915)) {
/* Wa_16016694945 */ /* Wa_16016694945 */
wa_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC); wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
} }
if (IS_XEHPSDV(i915)) { if (IS_XEHPSDV(i915)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment