Commit 6643b383 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2022-09-29' of...

Merge tag 'drm-intel-fixes-2022-09-29' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- Restrict forced preemption to the active context (Chris)
- Restrict perf_limit_reasons to the supported platforms - gen11+ (Ashutosh)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/YzXAkH1a32pYJD33@intel.com
parents 91462afa 7738be97
...@@ -165,6 +165,21 @@ struct intel_engine_execlists { ...@@ -165,6 +165,21 @@ struct intel_engine_execlists {
*/ */
struct timer_list preempt; struct timer_list preempt;
/**
* @preempt_target: active request at the time of the preemption request
*
* We force a preemption to occur if the pending contexts have not
* been promoted to active upon receipt of the CS ack event within
* the timeout. This timeout maybe chosen based on the target,
* using a very short timeout if the context is no longer schedulable.
* That short timeout may not be applicable to other contexts, so
* if a context switch should happen within before the preemption
* timeout, we may shoot early at an innocent context. To prevent this,
* we record which context was active at the time of the preemption
* request and only reset that context upon the timeout.
*/
const struct i915_request *preempt_target;
/** /**
* @ccid: identifier for contexts submitted to this engine * @ccid: identifier for contexts submitted to this engine
*/ */
......
...@@ -1241,6 +1241,9 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine, ...@@ -1241,6 +1241,9 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
if (!rq) if (!rq)
return 0; return 0;
/* Only allow ourselves to force reset the currently active context */
engine->execlists.preempt_target = rq;
/* Force a fast reset for terminated contexts (ignoring sysfs!) */ /* Force a fast reset for terminated contexts (ignoring sysfs!) */
if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq))) if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
return INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS; return INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS;
...@@ -2427,8 +2430,24 @@ static void execlists_submission_tasklet(struct tasklet_struct *t) ...@@ -2427,8 +2430,24 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
GEM_BUG_ON(inactive - post > ARRAY_SIZE(post)); GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
if (unlikely(preempt_timeout(engine))) { if (unlikely(preempt_timeout(engine))) {
const struct i915_request *rq = *engine->execlists.active;
/*
* If after the preempt-timeout expired, we are still on the
* same active request/context as before we initiated the
* preemption, reset the engine.
*
* However, if we have processed a CS event to switch contexts,
* but not yet processed the CS event for the pending
* preemption, reset the timer allowing the new context to
* gracefully exit.
*/
cancel_timer(&engine->execlists.preempt); cancel_timer(&engine->execlists.preempt);
if (rq == engine->execlists.preempt_target)
engine->execlists.error_interrupt |= ERROR_PREEMPT; engine->execlists.error_interrupt |= ERROR_PREEMPT;
else
set_timer_ms(&engine->execlists.preempt,
active_preempt_timeout(engine, rq));
} }
if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) { if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
......
...@@ -545,8 +545,7 @@ static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_ratl, RATL_MASK); ...@@ -545,8 +545,7 @@ static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_ratl, RATL_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_thermalert, VR_THERMALERT_MASK); static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_thermalert, VR_THERMALERT_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_tdc, VR_TDC_MASK); static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_tdc, VR_TDC_MASK);
static const struct attribute *freq_attrs[] = { static const struct attribute *throttle_reason_attrs[] = {
&dev_attr_punit_req_freq_mhz.attr,
&attr_throttle_reason_status.attr, &attr_throttle_reason_status.attr,
&attr_throttle_reason_pl1.attr, &attr_throttle_reason_pl1.attr,
&attr_throttle_reason_pl2.attr, &attr_throttle_reason_pl2.attr,
...@@ -763,11 +762,19 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj) ...@@ -763,11 +762,19 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
if (!is_object_gt(kobj)) if (!is_object_gt(kobj))
return; return;
ret = sysfs_create_files(kobj, freq_attrs); ret = sysfs_create_file(kobj, &dev_attr_punit_req_freq_mhz.attr);
if (ret)
drm_warn(&gt->i915->drm,
"failed to create gt%u punit_req_freq_mhz sysfs (%pe)",
gt->info.id, ERR_PTR(ret));
if (GRAPHICS_VER(gt->i915) >= 11) {
ret = sysfs_create_files(kobj, throttle_reason_attrs);
if (ret) if (ret)
drm_warn(&gt->i915->drm, drm_warn(&gt->i915->drm,
"failed to create gt%u throttle sysfs files (%pe)", "failed to create gt%u throttle sysfs files (%pe)",
gt->info.id, ERR_PTR(ret)); gt->info.id, ERR_PTR(ret));
}
if (HAS_MEDIA_RATIO_MODE(gt->i915) && intel_uc_uses_guc_slpc(&gt->uc)) { if (HAS_MEDIA_RATIO_MODE(gt->i915) && intel_uc_uses_guc_slpc(&gt->uc)) {
ret = sysfs_create_files(kobj, media_perf_power_attrs); ret = sysfs_create_files(kobj, media_perf_power_attrs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment