Commit fb5c551a authored by Chris Wilson's avatar Chris Wilson

drm/i915: Remove i915.enable_execlists module parameter

Execlists and legacy ringbuffer submission are no longer feature
comparable (execlists now offer greater functionality that should
overcome their performance hit) and obsoletes the unsafe module
parameter, i.e. comparing the two modes of execution is no longer
useful, so remove the debug tool.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> #i915_perf.c
Link: https://patchwork.freedesktop.org/patch/msgid/20171120205504.21892-1-chris@chris-wilson.co.uk
parent ba74cb10
......@@ -294,8 +294,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
* write.
*/
if (mmio->in_context &&
((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
i915_modparams.enable_execlists)
(ctx_ctrl & inhibit_mask) != inhibit_mask)
continue;
if (mmio->mask)
......
......@@ -1989,75 +1989,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
return 0;
}
static void i915_dump_lrc_obj(struct seq_file *m,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct i915_vma *vma = ctx->engine[engine->id].state;
struct page *page;
int j;
seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
if (!vma) {
seq_puts(m, "\tFake context\n");
return;
}
if (vma->flags & I915_VMA_GLOBAL_BIND)
seq_printf(m, "\tBound in GGTT at 0x%08x\n",
i915_ggtt_offset(vma));
if (i915_gem_object_pin_pages(vma->obj)) {
seq_puts(m, "\tFailed to get pages for context object\n\n");
return;
}
page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
if (page) {
u32 *reg_state = kmap_atomic(page);
for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
seq_printf(m,
"\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
j * 4,
reg_state[j], reg_state[j + 1],
reg_state[j + 2], reg_state[j + 3]);
}
kunmap_atomic(reg_state);
}
i915_gem_object_unpin_pages(vma->obj);
seq_putc(m, '\n');
}
static int i915_dump_lrc(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
int ret;
if (!i915_modparams.enable_execlists) {
seq_printf(m, "Logical Ring Contexts are disabled\n");
return 0;
}
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
list_for_each_entry(ctx, &dev_priv->contexts.list, link)
for_each_engine(engine, dev_priv, id)
i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex);
return 0;
}
static const char *swizzle_string(unsigned swizzle)
{
switch (swizzle) {
......@@ -4833,7 +4764,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_vbt", i915_vbt, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
{"i915_dump_lrc", i915_dump_lrc, 0},
{"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
......
......@@ -371,9 +371,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) {
value |= I915_SCHEDULER_CAP_ENABLED;
value |= I915_SCHEDULER_CAP_PRIORITY;
if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
i915_modparams.enable_execlists)
if (HAS_LOGICAL_RING_PREEMPTION(dev_priv))
value |= I915_SCHEDULER_CAP_PREEMPTION;
}
break;
......@@ -1054,10 +1052,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
i915_modparams.enable_execlists =
intel_sanitize_enable_execlists(dev_priv,
i915_modparams.enable_execlists);
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
* user's requested state against the hardware/driver capabilities. We
......
......@@ -3153,6 +3153,9 @@ intel_info(const struct drm_i915_private *dev_priv)
((dev_priv)->info.has_logical_ring_contexts)
#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
((dev_priv)->info.has_logical_ring_preemption)
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
......
......@@ -5003,7 +5003,7 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
return false;
/* TODO: make semaphores and Execlists play nicely together */
if (i915_modparams.enable_execlists)
if (HAS_EXECLISTS(dev_priv))
return false;
if (value >= 0)
......@@ -5147,12 +5147,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
if (!i915_modparams.enable_execlists) {
dev_priv->gt.resume = intel_legacy_submission_resume;
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
} else {
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->gt.resume = intel_lr_context_resume;
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
} else {
dev_priv->gt.resume = intel_legacy_submission_resume;
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
}
/* This is just a security blanket to placate dragons.
......
......@@ -460,14 +460,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
init_llist_head(&dev_priv->contexts.free_list);
if (intel_vgpu_active(dev_priv) &&
HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (!i915_modparams.enable_execlists) {
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
return -EINVAL;
}
}
/* Using the simple ida interface, the max is limited by sizeof(int) */
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
ida_init(&dev_priv->contexts.hw_ida);
......@@ -842,7 +834,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
struct intel_engine_cs *engine = req->engine;
lockdep_assert_held(&req->i915->drm.struct_mutex);
GEM_BUG_ON(i915_modparams.enable_execlists);
GEM_BUG_ON(HAS_EXECLISTS(req->i915));
if (!req->ctx->engine[engine->id].state) {
struct i915_gem_context *to = req->ctx;
......
......@@ -178,7 +178,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 0;
}
if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) {
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (has_full_48bit_ppgtt)
return 3;
......@@ -2162,7 +2162,7 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
if (i915_modparams.enable_execlists)
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
return 0;
if (!USES_PPGTT(dev_priv))
......
......@@ -99,10 +99,6 @@ i915_param_named_unsafe(enable_ppgtt, int, 0400,
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
i915_param_named_unsafe(enable_execlists, int, 0400,
"Override execlists usage. "
"(-1=auto [default], 0=disabled, 1=enabled)");
i915_param_named_unsafe(enable_psr, int, 0600,
"Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
......
......@@ -39,7 +39,6 @@
param(int, enable_dc, -1) \
param(int, enable_fbc, -1) \
param(int, enable_ppgtt, -1) \
param(int, enable_execlists, -1) \
param(int, enable_psr, -1) \
param(int, disable_power_well, -1) \
param(int, enable_ips, 1) \
......
......@@ -1216,9 +1216,9 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
if (i915_modparams.enable_execlists)
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
else {
} else {
struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct intel_ring *ring;
int ret;
......@@ -1262,7 +1262,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
if (i915_modparams.enable_execlists) {
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
} else {
struct intel_engine_cs *engine = dev_priv->engine[RCS];
......@@ -3439,7 +3439,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
gen7_oa_hw_tail_read;
dev_priv->perf.oa.oa_formats = hsw_oa_formats;
} else if (i915_modparams.enable_execlists) {
} else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
* this driver, before upstreaming did this) it didn't seem
......
......@@ -164,9 +164,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
case 9:
return GEN9_LR_CONTEXT_RENDER_SIZE;
case 8:
return i915_modparams.enable_execlists ?
GEN8_LR_CONTEXT_RENDER_SIZE :
GEN8_CXT_TOTAL_SIZE;
return GEN8_LR_CONTEXT_RENDER_SIZE;
case 7:
if (IS_HASWELL(dev_priv))
return HSW_CXT_TOTAL_SIZE;
......@@ -316,7 +314,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
&intel_engine_classes[engine->class];
int (*init)(struct intel_engine_cs *engine);
if (i915_modparams.enable_execlists)
if (HAS_EXECLISTS(dev_priv))
init = class_info->init_execlists;
else
init = class_info->init_legacy;
......@@ -1739,7 +1737,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
if (i915_modparams.enable_execlists) {
if (HAS_EXECLISTS(dev_priv)) {
const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
u32 ptr, read, write;
unsigned int idx;
......
......@@ -95,11 +95,6 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return 0;
}
if (!i915_modparams.enable_execlists) {
DRM_ERROR("i915 GVT-g loading failed due to disabled execlists mode\n");
return -EIO;
}
if (i915_modparams.enable_guc_submission) {
DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;
......
......@@ -218,37 +218,6 @@ static void execlists_init_reg_state(u32 *reg_state,
struct intel_engine_cs *engine,
struct intel_ring *ring);
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
* @dev_priv: i915 device private
* @enable_execlists: value of i915.enable_execlists module parameter.
*
* Only certain platforms support Execlists (the prerequisites being
* support for Logical Ring Contexts and Aliasing PPGTT or better).
*
* Return: 1 if Execlists is supported and has to be enabled.
*/
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
{
/* On platforms with execlist available, vGPU will only
* support execlist mode, no ring buffer mode.
*/
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
return 1;
if (INTEL_GEN(dev_priv) >= 9)
return 1;
if (enable_execlists == 0)
return 0;
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
USES_PPGTT(dev_priv))
return 1;
return 0;
}
/**
* intel_lr_context_descriptor_update() - calculate & cache the descriptor
* descriptor for a pinned context
......
......@@ -107,8 +107,4 @@ intel_lr_context_descriptor(struct i915_gem_context *ctx,
return ctx->engine[engine->id].lrc_desc;
}
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
int enable_execlists);
#endif /* _INTEL_LRC_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment