Commit f61eae18 authored by Matthew Brost's avatar Matthew Brost Committed by John Harrison

drm/i915/guc: Take engine PM when a context is pinned with GuC submission

Taking a PM reference to prevent intel_gt_wait_for_idle from short
circuiting while any user context has scheduling enabled. Returning GT
idle when it is not can cause all sorts of issues throughout the stack.

v2:
 (Daniel Vetter)
  - Add might_lock annotations to pin / unpin function
v3:
 (CI)
  - Drop intel_engine_pm_might_put from unpin path as an async put is
    used
v4:
 (John Harrison)
  - Make intel_engine_pm_might_get/put work with GuC virtual engines
  - Update commit message
v5:
  - Update commit message again
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211014172005.27155-4-matthew.brost@intel.com
parent 1a52faed
......@@ -240,6 +240,8 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
if (err)
goto err_post_unpin;
intel_engine_pm_might_get(ce->engine);
if (unlikely(intel_context_is_closed(ce))) {
err = -ENOENT;
goto err_unlock;
......
......@@ -6,9 +6,11 @@
#ifndef INTEL_ENGINE_PM_H
#define INTEL_ENGINE_PM_H
#include "i915_drv.h"
#include "i915_request.h"
#include "intel_engine_types.h"
#include "intel_wakeref.h"
#include "intel_gt_pm.h"
static inline bool
intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
......@@ -31,6 +33,21 @@ static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
return intel_wakeref_get_if_active(&engine->wakeref);
}
static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine)
{
if (!intel_engine_is_virtual(engine)) {
intel_wakeref_might_get(&engine->wakeref);
} else {
struct intel_gt *gt = engine->gt;
struct intel_engine_cs *tengine;
intel_engine_mask_t tmp, mask = engine->mask;
for_each_engine_masked(tengine, gt, mask, tmp)
intel_wakeref_might_get(&tengine->wakeref);
}
intel_gt_pm_might_get(engine->gt);
}
static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
{
intel_wakeref_put(&engine->wakeref);
......@@ -52,6 +69,21 @@ static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
intel_wakeref_unlock_wait(&engine->wakeref);
}
static inline void intel_engine_pm_might_put(struct intel_engine_cs *engine)
{
if (!intel_engine_is_virtual(engine)) {
intel_wakeref_might_put(&engine->wakeref);
} else {
struct intel_gt *gt = engine->gt;
struct intel_engine_cs *tengine;
intel_engine_mask_t tmp, mask = engine->mask;
for_each_engine_masked(tengine, gt, mask, tmp)
intel_wakeref_might_put(&tengine->wakeref);
}
intel_gt_pm_might_put(engine->gt);
}
static inline struct i915_request *
intel_engine_create_kernel_request(struct intel_engine_cs *engine)
{
......
......@@ -31,6 +31,11 @@ static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
return intel_wakeref_get_if_active(&gt->wakeref);
}
static inline void intel_gt_pm_might_get(struct intel_gt *gt)
{
intel_wakeref_might_get(&gt->wakeref);
}
static inline void intel_gt_pm_put(struct intel_gt *gt)
{
intel_wakeref_put(&gt->wakeref);
......@@ -41,6 +46,11 @@ static inline void intel_gt_pm_put_async(struct intel_gt *gt)
intel_wakeref_put_async(&gt->wakeref);
}
static inline void intel_gt_pm_might_put(struct intel_gt *gt)
{
intel_wakeref_might_put(&gt->wakeref);
}
#define with_intel_gt_pm(gt, tmp) \
for (tmp = 1, intel_gt_pm_get(gt); tmp; \
intel_gt_pm_put(gt), tmp = 0)
......
......@@ -1571,7 +1571,12 @@ static int guc_context_pre_pin(struct intel_context *ce,
static int guc_context_pin(struct intel_context *ce, void *vaddr)
{
return __guc_context_pin(ce, ce->engine, vaddr);
int ret = __guc_context_pin(ce, ce->engine, vaddr);
if (likely(!ret && !intel_context_is_barrier(ce)))
intel_engine_pm_get(ce->engine);
return ret;
}
static void guc_context_unpin(struct intel_context *ce)
......@@ -1580,6 +1585,9 @@ static void guc_context_unpin(struct intel_context *ce)
unpin_guc_id(guc, ce);
lrc_unpin(ce);
if (likely(!intel_context_is_barrier(ce)))
intel_engine_pm_put_async(ce->engine);
}
static void guc_context_post_unpin(struct intel_context *ce)
......@@ -2341,8 +2349,30 @@ static int guc_virtual_context_pre_pin(struct intel_context *ce,
static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
{
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
int ret = __guc_context_pin(ce, engine, vaddr);
intel_engine_mask_t tmp, mask = ce->engine->mask;
if (likely(!ret))
for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
intel_engine_pm_get(engine);
return __guc_context_pin(ce, engine, vaddr);
return ret;
}
static void guc_virtual_context_unpin(struct intel_context *ce)
{
intel_engine_mask_t tmp, mask = ce->engine->mask;
struct intel_engine_cs *engine;
struct intel_guc *guc = ce_to_guc(ce);
GEM_BUG_ON(context_enabled(ce));
GEM_BUG_ON(intel_context_is_barrier(ce));
unpin_guc_id(guc, ce);
lrc_unpin(ce);
for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
intel_engine_pm_put_async(engine);
}
static void guc_virtual_context_enter(struct intel_context *ce)
......@@ -2379,7 +2409,7 @@ static const struct intel_context_ops virtual_guc_context_ops = {
.pre_pin = guc_virtual_context_pre_pin,
.pin = guc_virtual_context_pin,
.unpin = guc_context_unpin,
.unpin = guc_virtual_context_unpin,
.post_unpin = guc_context_post_unpin,
.ban = guc_context_ban,
......
......@@ -123,6 +123,12 @@ enum {
__INTEL_WAKEREF_PUT_LAST_BIT__
};
static inline void
intel_wakeref_might_get(struct intel_wakeref *wf)
{
might_lock(&wf->mutex);
}
/**
* intel_wakeref_put_flags: Release the wakeref
* @wf: the wakeref
......@@ -170,6 +176,12 @@ intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
}
static inline void
intel_wakeref_might_put(struct intel_wakeref *wf)
{
might_lock(&wf->mutex);
}
/**
* intel_wakeref_lock: Lock the wakeref (mutex)
* @wf: the wakeref
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment