Commit e87a005d authored by Jani Nikula's avatar Jani Nikula

drm/i915: add helpers for platform specific revision id range checks

Revision checks are almost always accompanied by a platform check. (The
exceptions are platform specific code.) Add helpers to check for a
platform and a revision range: IS_SKL_REVID() and IS_BXT_REVID(). In
most places this simplifies and clarifies the code. It will be obvious
that revid macros are used for the correct platform.

This should make it easier to find all the revision checks for
workarounds for each platform, and make it easier to remove them once we
drop support for early hardware revisions.

This should also make it easier to differentiate between Skylake and
Kabylake revision checks when Kabylake support is added.

v2: rebase
Acked-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1445343722-3312-3-git-send-email-jani.nikula@intel.com
parent fffda3f4
...@@ -2433,6 +2433,15 @@ struct drm_i915_cmd_table { ...@@ -2433,6 +2433,15 @@ struct drm_i915_cmd_table {
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) #define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
#define REVID_FOREVER 0xff
/*
* Return true if revision is in range [since,until] inclusive.
*
* Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
*/
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) #define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) #define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
...@@ -2501,11 +2510,15 @@ struct drm_i915_cmd_table { ...@@ -2501,11 +2510,15 @@ struct drm_i915_cmd_table {
#define SKL_REVID_E0 0x4 #define SKL_REVID_E0 0x4
#define SKL_REVID_F0 0x5 #define SKL_REVID_F0 0x5
#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
#define BXT_REVID_A0 0x0 #define BXT_REVID_A0 0x0
#define BXT_REVID_A1 0x1 #define BXT_REVID_A1 0x1
#define BXT_REVID_B0 0x3 #define BXT_REVID_B0 0x3
#define BXT_REVID_C0 0x9 #define BXT_REVID_C0 0x9
#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
/* /*
* The genX designation typically refers to the render engine, so render * The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks * capability related checks should use IS_GEN, while display and other checks
......
...@@ -3826,7 +3826,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, ...@@ -3826,7 +3826,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
* cacheline, whereas normally such cachelines would get * cacheline, whereas normally such cachelines would get
* invalidated. * invalidated.
*/ */
if (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A1) if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
return -ENODEV; return -ENODEV;
level = I915_CACHE_LLC; level = I915_CACHE_LLC;
......
...@@ -161,9 +161,9 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, ...@@ -161,9 +161,9 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */ /* WaRsDisableCoarsePowerGating:skl,bxt */
if (!intel_enable_rc6(dev_priv->dev) || if (!intel_enable_rc6(dev_priv->dev) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) <= BXT_REVID_A1)) || IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
(IS_SKL_GT3(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)) || (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
(IS_SKL_GT4(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0))) (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
data[1] = 0; data[1] = 0;
else else
/* bit 0 and 1 are for Render and Media domain separately */ /* bit 0 and 1 are for Render and Media domain separately */
......
...@@ -3247,8 +3247,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) ...@@ -3247,8 +3247,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* On BXT A0/A1, sw needs to activate DDIA HPD logic and * On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection. * interrupts to check the external panel connection.
*/ */
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) <= BXT_REVID_A1) if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B)
&& port == PORT_B)
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port; dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
else else
dev_priv->hotplug.irq_port[port] = intel_dig_port; dev_priv->hotplug.irq_port[port] = intel_dig_port;
......
...@@ -1192,7 +1192,7 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates) ...@@ -1192,7 +1192,7 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
static bool intel_dp_source_supports_hbr2(struct drm_device *dev) static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
{ {
/* WaDisableHBR2:skl */ /* WaDisableHBR2:skl */
if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
return false; return false;
if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) || if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
...@@ -6087,7 +6087,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -6087,7 +6087,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
break; break;
case PORT_B: case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B; intel_encoder->hpd_pin = HPD_PORT_B;
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) <= BXT_REVID_A1)) if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A; intel_encoder->hpd_pin = HPD_PORT_A;
break; break;
case PORT_C: case PORT_C:
......
...@@ -322,8 +322,8 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) ...@@ -322,8 +322,8 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE); I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
/* WaDisableMinuteIaClockGating:skl,bxt */ /* WaDisableMinuteIaClockGating:skl,bxt */
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) { IS_BXT_REVID(dev, 0, BXT_REVID_A0)) {
I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) & I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
~GUC_ENABLE_MIA_CLOCK_GATING)); ~GUC_ENABLE_MIA_CLOCK_GATING));
} }
......
...@@ -2039,7 +2039,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -2039,7 +2039,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
* On BXT A0/A1, sw needs to activate DDIA HPD logic and * On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection. * interrupts to check the external panel connection.
*/ */
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) <= BXT_REVID_A1)) if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
intel_encoder->hpd_pin = HPD_PORT_A; intel_encoder->hpd_pin = HPD_PORT_A;
else else
intel_encoder->hpd_pin = HPD_PORT_B; intel_encoder->hpd_pin = HPD_PORT_B;
......
...@@ -284,8 +284,8 @@ static bool disable_lite_restore_wa(struct intel_engine_cs *ring) ...@@ -284,8 +284,8 @@ static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) || return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) && IS_BXT_REVID(dev, 0, BXT_REVID_A0)) &&
(ring->id == VCS || ring->id == VCS2); (ring->id == VCS || ring->id == VCS2);
} }
...@@ -1147,7 +1147,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, ...@@ -1147,7 +1147,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
* this batch updates GEN8_L3SQCREG4 with default value we need to * this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush. * set this bit here to retain the WA during flush.
*/ */
if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0) if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
...@@ -1312,8 +1312,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring, ...@@ -1312,8 +1312,8 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaDisableCtxRestoreArbitration:skl,bxt */ /* WaDisableCtxRestoreArbitration:skl,bxt */
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) || if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) IS_BXT_REVID(dev, 0, BXT_REVID_A0))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
...@@ -1338,8 +1338,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring, ...@@ -1338,8 +1338,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_B0)) || if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) { IS_BXT_REVID(dev, 0, BXT_REVID_A0)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); wa_ctx_emit(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
wa_ctx_emit(batch, index, wa_ctx_emit(batch, index,
...@@ -1348,8 +1348,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring, ...@@ -1348,8 +1348,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
} }
/* WaDisableCtxRestoreArbitration:skl,bxt */ /* WaDisableCtxRestoreArbitration:skl,bxt */
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) <= SKL_REVID_D0)) || if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) == BXT_REVID_A0))) IS_BXT_REVID(dev, 0, BXT_REVID_A0))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
...@@ -1946,7 +1946,7 @@ static int logical_render_ring_init(struct drm_device *dev) ...@@ -1946,7 +1946,7 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->init_hw = gen8_init_render_ring; ring->init_hw = gen8_init_render_ring;
ring->init_context = gen8_init_rcs_context; ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control; ring->cleanup = intel_fini_pipe_control;
if (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A1) { if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno; ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno; ring->set_seqno = bxt_a_set_seqno;
} else { } else {
...@@ -1998,7 +1998,7 @@ static int logical_bsd_ring_init(struct drm_device *dev) ...@@ -1998,7 +1998,7 @@ static int logical_bsd_ring_init(struct drm_device *dev)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring; ring->init_hw = gen8_init_common_ring;
if (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A1) { if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno; ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno; ring->set_seqno = bxt_a_set_seqno;
} else { } else {
...@@ -2053,7 +2053,7 @@ static int logical_blt_ring_init(struct drm_device *dev) ...@@ -2053,7 +2053,7 @@ static int logical_blt_ring_init(struct drm_device *dev)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring; ring->init_hw = gen8_init_common_ring;
if (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A1) { if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno; ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno; ring->set_seqno = bxt_a_set_seqno;
} else { } else {
...@@ -2083,7 +2083,7 @@ static int logical_vebox_ring_init(struct drm_device *dev) ...@@ -2083,7 +2083,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring; ring->init_hw = gen8_init_common_ring;
if (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A1) { if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
ring->get_seqno = bxt_a_get_seqno; ring->get_seqno = bxt_a_get_seqno;
ring->set_seqno = bxt_a_set_seqno; ring->set_seqno = bxt_a_set_seqno;
} else { } else {
......
...@@ -4386,7 +4386,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) ...@@ -4386,7 +4386,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
if (IS_BROXTON(dev) && (INTEL_REVID(dev) <= BXT_REVID_A1)) if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
return; return;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
...@@ -4710,7 +4710,7 @@ static void gen9_enable_rps(struct drm_device *dev) ...@@ -4710,7 +4710,7 @@ static void gen9_enable_rps(struct drm_device *dev)
gen6_init_rps_frequencies(dev); gen6_init_rps_frequencies(dev);
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
if (IS_BROXTON(dev) && (INTEL_REVID(dev) <= BXT_REVID_A1)) { if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return; return;
} }
...@@ -4755,7 +4755,7 @@ static void gen9_enable_rc6(struct drm_device *dev) ...@@ -4755,7 +4755,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
/* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
(INTEL_REVID(dev) <= SKL_REVID_E0))) IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
else else
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
...@@ -4779,8 +4779,8 @@ static void gen9_enable_rc6(struct drm_device *dev) ...@@ -4779,8 +4779,8 @@ static void gen9_enable_rc6(struct drm_device *dev)
DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
"on" : "off"); "on" : "off");
/* WaRsUseTimeoutMode */ /* WaRsUseTimeoutMode */
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) || if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0)) { IS_BXT_REVID(dev, 0, BXT_REVID_A0)) {
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE | GEN7_RC_CTL_TO_MODE |
...@@ -4796,8 +4796,9 @@ static void gen9_enable_rc6(struct drm_device *dev) ...@@ -4796,8 +4796,9 @@ static void gen9_enable_rc6(struct drm_device *dev)
* 3b: Enable Coarse Power Gating only when RC6 is enabled. * 3b: Enable Coarse Power Gating only when RC6 is enabled.
* WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
*/ */
if ((IS_BROXTON(dev) && (INTEL_REVID(dev) <= BXT_REVID_A1)) || if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0))) ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
I915_WRITE(GEN9_PG_ENABLE, 0); I915_WRITE(GEN9_PG_ENABLE, 0);
else else
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
......
...@@ -922,17 +922,15 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) ...@@ -922,17 +922,15 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 ||
INTEL_REVID(dev) == SKL_REVID_B0)) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A1)) {
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1))
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE); GEN9_DG_MIRROR_FIX_ENABLE);
}
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A1)) {
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE); GEN9_RHWO_OPTIMIZATION_DISABLE);
/* /*
...@@ -942,12 +940,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) ...@@ -942,12 +940,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
*/ */
} }
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
IS_BROXTON(dev)) {
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */ /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX); GEN9_ENABLE_YV12_BUGFIX);
}
/* Wa4x4STCOptimizationDisable:skl,bxt */ /* Wa4x4STCOptimizationDisable:skl,bxt */
/* WaDisablePartialResolveInVc:skl,bxt */ /* WaDisablePartialResolveInVc:skl,bxt */
...@@ -959,24 +955,22 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) ...@@ -959,24 +955,22 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
GEN9_CCS_TLB_PREFETCH_ENABLE); GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaDisableMaskBasedCammingInRCC:skl,bxt */ /* WaDisableMaskBasedCammingInRCC:skl,bxt */
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) || if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A1)) IS_BXT_REVID(dev, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE); PIXEL_MASK_CAMMING_DISABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */ /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) || if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0)) IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
if (IS_SKYLAKE(dev) || if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
(IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_B0)) {
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS); GEN8_SAMPLER_POWER_BYPASS_DIS);
}
/* WaDisableSTUnitPowerOptimization:skl,bxt */ /* WaDisableSTUnitPowerOptimization:skl,bxt */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
...@@ -1036,7 +1030,7 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) ...@@ -1036,7 +1030,7 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
if (ret) if (ret)
return ret; return ret;
if (INTEL_REVID(dev) <= SKL_REVID_D0) { if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
/* WaDisableHDCInvalidation:skl */ /* WaDisableHDCInvalidation:skl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
BDW_DISABLE_HDC_INVALIDATION); BDW_DISABLE_HDC_INVALIDATION);
...@@ -1049,23 +1043,23 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) ...@@ -1049,23 +1043,23 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
* involving this register should also be added to WA batch as required. * involving this register should also be added to WA batch as required.
*/ */
if (INTEL_REVID(dev) <= SKL_REVID_E0) if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
/* WaDisableLSQCROPERFforOCL:skl */ /* WaDisableLSQCROPERFforOCL:skl */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS); GEN8_LQSC_RO_PERF_DIS);
/* WaEnableGapsTsvCreditFix:skl */ /* WaEnableGapsTsvCreditFix:skl */
if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) { if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE)); GEN9_GAPS_TSV_CREDIT_DISABLE));
} }
/* WaDisablePowerCompilerClockGating:skl */ /* WaDisablePowerCompilerClockGating:skl */
if (INTEL_REVID(dev) == SKL_REVID_B0) if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
WA_SET_BIT_MASKED(HIZ_CHICKEN, WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
if (INTEL_REVID(dev) <= SKL_REVID_D0) { if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
/* /*
*Use Force Non-Coherent whenever executing a 3D context. This *Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event * is a workaround for a possible hang in the unlikely event
...@@ -1076,19 +1070,17 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) ...@@ -1076,19 +1070,17 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
HDC_FORCE_NON_COHERENT); HDC_FORCE_NON_COHERENT);
} }
if (INTEL_REVID(dev) == SKL_REVID_C0 ||
INTEL_REVID(dev) == SKL_REVID_D0)
/* WaBarrierPerformanceFixDisable:skl */ /* WaBarrierPerformanceFixDisable:skl */
if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
WA_SET_BIT_MASKED(HDC_CHICKEN0, WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE | HDC_FENCE_DEST_SLM_DISABLE |
HDC_BARRIER_PERFORMANCE_DISABLE); HDC_BARRIER_PERFORMANCE_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:skl */ /* WaDisableSbeCacheDispatchPortSharing:skl */
if (INTEL_REVID(dev) <= SKL_REVID_F0) { if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
WA_SET_BIT_MASKED( WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1, GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
return skl_tune_iz_hashing(ring); return skl_tune_iz_hashing(ring);
} }
...@@ -1105,11 +1097,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring) ...@@ -1105,11 +1097,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
/* WaStoreMultiplePTEenable:bxt */ /* WaStoreMultiplePTEenable:bxt */
/* This is a requirement according to Hardware specification */ /* This is a requirement according to Hardware specification */
if (INTEL_REVID(dev) == BXT_REVID_A0) if (IS_BXT_REVID(dev, 0, BXT_REVID_A0))
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
/* WaSetClckGatingDisableMedia:bxt */ /* WaSetClckGatingDisableMedia:bxt */
if (INTEL_REVID(dev) == BXT_REVID_A0) { if (IS_BXT_REVID(dev, 0, BXT_REVID_A0)) {
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
} }
...@@ -1119,7 +1111,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring) ...@@ -1119,7 +1111,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
STALL_DOP_GATING_DISABLE); STALL_DOP_GATING_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:bxt */ /* WaDisableSbeCacheDispatchPortSharing:bxt */
if (INTEL_REVID(dev) <= BXT_REVID_B0) { if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
WA_SET_BIT_MASKED( WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1, GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment