Commit 4050e6f2 authored by Jani Nikula's avatar Jani Nikula

drm/i915/gt: remove some limited use register access wrappers

Remove rmw_set(), rmw_clear(), clear_register(), rmw_set_fw(), and
rmw_clear_fw(). They're just one too many levels of abstraction for
register access, for very specific purposes.

clear_register() seems like a micro-optimization bypassing the write
when the register is already clear, but that trick has ceased to work
since commit 06b975d5 ("drm/i915: make intel_uncore_rmw() write
unconditionally"). Just clear the register in the most obvious way.
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Reviewed-by: default avatarAndrzej Hajda <andrzej.hajda@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221123164916.4128733-1-jani.nikula@intel.com
parent 3d0f98fa
...@@ -218,21 +218,6 @@ int intel_gt_init_hw(struct intel_gt *gt) ...@@ -218,21 +218,6 @@ int intel_gt_init_hw(struct intel_gt *gt)
return ret; return ret;
} }
static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
{
intel_uncore_rmw(uncore, reg, 0, set);
}
static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
{
intel_uncore_rmw(uncore, reg, clr, 0);
}
static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
{
intel_uncore_rmw(uncore, reg, 0, 0);
}
static void gen6_clear_engine_error_register(struct intel_engine_cs *engine) static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
{ {
GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0); GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
...@@ -258,14 +243,14 @@ intel_gt_clear_error_registers(struct intel_gt *gt, ...@@ -258,14 +243,14 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
u32 eir; u32 eir;
if (GRAPHICS_VER(i915) != 2) if (GRAPHICS_VER(i915) != 2)
clear_register(uncore, PGTBL_ER); intel_uncore_write(uncore, PGTBL_ER, 0);
if (GRAPHICS_VER(i915) < 4) if (GRAPHICS_VER(i915) < 4)
clear_register(uncore, IPEIR(RENDER_RING_BASE)); intel_uncore_write(uncore, IPEIR(RENDER_RING_BASE), 0);
else else
clear_register(uncore, IPEIR_I965); intel_uncore_write(uncore, IPEIR_I965, 0);
clear_register(uncore, EIR); intel_uncore_write(uncore, EIR, 0);
eir = intel_uncore_read(uncore, EIR); eir = intel_uncore_read(uncore, EIR);
if (eir) { if (eir) {
/* /*
...@@ -273,7 +258,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt, ...@@ -273,7 +258,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
* mask them. * mask them.
*/ */
drm_dbg(&gt->i915->drm, "EIR stuck: 0x%08x, masking\n", eir); drm_dbg(&gt->i915->drm, "EIR stuck: 0x%08x, masking\n", eir);
rmw_set(uncore, EMR, eir); intel_uncore_rmw(uncore, EMR, 0, eir);
intel_uncore_write(uncore, GEN2_IIR, intel_uncore_write(uncore, GEN2_IIR,
I915_MASTER_ERROR_INTERRUPT); I915_MASTER_ERROR_INTERRUPT);
} }
...@@ -283,10 +268,10 @@ intel_gt_clear_error_registers(struct intel_gt *gt, ...@@ -283,10 +268,10 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
RING_FAULT_VALID, 0); RING_FAULT_VALID, 0);
intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG); intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
} else if (GRAPHICS_VER(i915) >= 12) { } else if (GRAPHICS_VER(i915) >= 12) {
rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID); intel_uncore_rmw(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID, 0);
intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG); intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
} else if (GRAPHICS_VER(i915) >= 8) { } else if (GRAPHICS_VER(i915) >= 8) {
rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID); intel_uncore_rmw(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID, 0);
intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG); intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
} else if (GRAPHICS_VER(i915) >= 6) { } else if (GRAPHICS_VER(i915) >= 6) {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
......
...@@ -35,16 +35,6 @@ ...@@ -35,16 +35,6 @@
/* XXX How to handle concurrent GGTT updates using tiling registers? */ /* XXX How to handle concurrent GGTT updates using tiling registers? */
#define RESET_UNDER_STOP_MACHINE 0 #define RESET_UNDER_STOP_MACHINE 0
static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
{
intel_uncore_rmw_fw(uncore, reg, 0, set);
}
static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
{
intel_uncore_rmw_fw(uncore, reg, clr, 0);
}
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned) static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
{ {
struct drm_i915_file_private *file_priv = ctx->file_priv; struct drm_i915_file_private *file_priv = ctx->file_priv;
...@@ -212,7 +202,7 @@ static int g4x_do_reset(struct intel_gt *gt, ...@@ -212,7 +202,7 @@ static int g4x_do_reset(struct intel_gt *gt,
int ret; int ret;
/* WaVcpClkGateDisableForMediaReset:ctg,elk */ /* WaVcpClkGateDisableForMediaReset:ctg,elk */
rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE); intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, 0, VCP_UNIT_CLOCK_GATE_DISABLE);
intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D); intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
pci_write_config_byte(pdev, I915_GDRST, pci_write_config_byte(pdev, I915_GDRST,
...@@ -234,7 +224,7 @@ static int g4x_do_reset(struct intel_gt *gt, ...@@ -234,7 +224,7 @@ static int g4x_do_reset(struct intel_gt *gt,
out: out:
pci_write_config_byte(pdev, I915_GDRST, 0); pci_write_config_byte(pdev, I915_GDRST, 0);
rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE); intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE, 0);
intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D); intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
return ret; return ret;
...@@ -448,7 +438,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, ...@@ -448,7 +438,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine,
* to reset it as well (we will unlock it once the reset sequence is * to reset it as well (we will unlock it once the reset sequence is
* completed). * completed).
*/ */
rmw_set_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit); intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, 0, sfc_lock.lock_bit);
ret = __intel_wait_for_register_fw(uncore, ret = __intel_wait_for_register_fw(uncore,
sfc_lock.ack_reg, sfc_lock.ack_reg,
...@@ -498,7 +488,7 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine) ...@@ -498,7 +488,7 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine)
get_sfc_forced_lock_data(engine, &sfc_lock); get_sfc_forced_lock_data(engine, &sfc_lock);
rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit); intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit, 0);
} }
static int __gen11_reset_engines(struct intel_gt *gt, static int __gen11_reset_engines(struct intel_gt *gt,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment