Commit 23aae183 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-fixes-2019-11-07' of...

Merge tag 'drm-intel-next-fixes-2019-11-07' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

One RCU fix and fix for suspend GEM_BUG_ON (with dependencies).
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191107145058.GA17401@jlahtine-desk.ger.corp.intel.com
parents 393fdfdb d9dace94
...@@ -11,50 +11,6 @@ ...@@ -11,50 +11,6 @@
#include "i915_drv.h" #include "i915_drv.h"
static bool switch_to_kernel_context_sync(struct intel_gt *gt)
{
bool result = !intel_gt_is_wedged(gt);
if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/* XXX hide warning from gem_eio */
if (i915_modparams.reset) {
dev_err(gt->i915->drm.dev,
"Failed to idle engines, declaring wedged!\n");
GEM_TRACE_DUMP();
}
/*
* Forcibly cancel outstanding work and leave
* the gpu quiet.
*/
intel_gt_set_wedged(gt);
result = false;
}
if (intel_gt_pm_wait_for_idle(gt))
result = false;
return result;
}
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
int count = atomic_read(&gt->user_wakeref);
/* Inside suspend/resume so single threaded, no races to worry about. */
if (likely(!count))
return;
intel_gt_pm_get(gt);
if (suspend) {
GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
atomic_sub(count, &gt->wakeref.count);
} else {
atomic_add(count, &gt->wakeref.count);
}
intel_gt_pm_put(gt);
}
void i915_gem_suspend(struct drm_i915_private *i915) void i915_gem_suspend(struct drm_i915_private *i915)
{ {
GEM_TRACE("\n"); GEM_TRACE("\n");
...@@ -62,8 +18,6 @@ void i915_gem_suspend(struct drm_i915_private *i915) ...@@ -62,8 +18,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0); intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
flush_workqueue(i915->wq); flush_workqueue(i915->wq);
user_forcewake(&i915->gt, true);
/* /*
* We have to flush all the executing contexts to main memory so * We have to flush all the executing contexts to main memory so
* that they can saved in the hibernation image. To ensure the last * that they can saved in the hibernation image. To ensure the last
...@@ -73,8 +27,7 @@ void i915_gem_suspend(struct drm_i915_private *i915) ...@@ -73,8 +27,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do * state. Fortunately, the kernel_context is disposable and we do
* not rely on its state. * not rely on its state.
*/ */
intel_gt_suspend(&i915->gt); intel_gt_suspend_prepare(&i915->gt);
intel_uc_suspend(&i915->gt.uc);
i915_gem_drain_freed_objects(i915); i915_gem_drain_freed_objects(i915);
} }
...@@ -116,6 +69,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) ...@@ -116,6 +69,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
* machine in an unusable condition. * machine in an unusable condition.
*/ */
intel_gt_suspend_late(&i915->gt);
spin_lock_irqsave(&i915->mm.obj_lock, flags); spin_lock_irqsave(&i915->mm.obj_lock, flags);
for (phase = phases; *phase; phase++) { for (phase = phases; *phase; phase++) {
LIST_HEAD(keep); LIST_HEAD(keep);
...@@ -140,8 +95,6 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) ...@@ -140,8 +95,6 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
list_splice_tail(&keep, *phase); list_splice_tail(&keep, *phase);
} }
spin_unlock_irqrestore(&i915->mm.obj_lock, flags); spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
i915_gem_sanitize(i915);
} }
void i915_gem_resume(struct drm_i915_private *i915) void i915_gem_resume(struct drm_i915_private *i915)
...@@ -161,14 +114,6 @@ void i915_gem_resume(struct drm_i915_private *i915) ...@@ -161,14 +114,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
if (intel_gt_resume(&i915->gt)) if (intel_gt_resume(&i915->gt))
goto err_wedged; goto err_wedged;
intel_uc_resume(&i915->gt.uc);
/* Always reload a context for powersaving. */
if (!switch_to_kernel_context_sync(&i915->gt))
goto err_wedged;
user_forcewake(&i915->gt, false);
out_unlock: out_unlock:
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
return; return;
......
...@@ -31,9 +31,11 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) ...@@ -31,9 +31,11 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
intel_uc_init_early(&gt->uc); intel_uc_init_early(&gt->uc);
} }
void intel_gt_init_hw_early(struct drm_i915_private *i915) void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
{ {
i915->gt.ggtt = &i915->ggtt; gt->ggtt = ggtt;
intel_gt_sanitize(gt, false);
} }
static void init_unused_ring(struct intel_gt *gt, u32 base) static void init_unused_ring(struct intel_gt *gt, u32 base)
......
...@@ -28,7 +28,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) ...@@ -28,7 +28,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
} }
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
void intel_gt_init_hw_early(struct drm_i915_private *i915); void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
int __must_check intel_gt_init_hw(struct intel_gt *gt); int __must_check intel_gt_init_hw(struct intel_gt *gt);
int intel_gt_init(struct intel_gt *gt); int intel_gt_init(struct intel_gt *gt);
void intel_gt_driver_register(struct intel_gt *gt); void intel_gt_driver_register(struct intel_gt *gt);
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
* Copyright © 2019 Intel Corporation * Copyright © 2019 Intel Corporation
*/ */
#include <linux/suspend.h>
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_globals.h" #include "i915_globals.h"
#include "i915_params.h" #include "i915_params.h"
...@@ -18,6 +20,24 @@ ...@@ -18,6 +20,24 @@
#include "intel_rps.h" #include "intel_rps.h"
#include "intel_wakeref.h" #include "intel_wakeref.h"
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
int count = atomic_read(&gt->user_wakeref);
/* Inside suspend/resume so single threaded, no races to worry about. */
if (likely(!count))
return;
intel_gt_pm_get(gt);
if (suspend) {
GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
atomic_sub(count, &gt->wakeref.count);
} else {
atomic_add(count, &gt->wakeref.count);
}
intel_gt_pm_put(gt);
}
static int __gt_unpark(struct intel_wakeref *wf) static int __gt_unpark(struct intel_wakeref *wf)
{ {
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref); struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
...@@ -118,8 +138,22 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force) ...@@ -118,8 +138,22 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
intel_wakeref_t wakeref;
GEM_TRACE("\n"); GEM_TRACE("force:%s\n", yesno(force));
/* Use a raw wakeref to avoid calling intel_display_power_get early */
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
/*
* As we have just resumed the machine and woken the device up from
* deep PCI sleep (presumably D3_cold), assume the HW has been reset
* back to defaults, recovering from whatever wedged state we left it
* in and so worth trying to use the device once more.
*/
if (intel_gt_is_wedged(gt))
intel_gt_unset_wedged(gt);
intel_uc_sanitize(&gt->uc); intel_uc_sanitize(&gt->uc);
...@@ -127,6 +161,8 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force) ...@@ -127,6 +161,8 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
if (engine->reset.prepare) if (engine->reset.prepare)
engine->reset.prepare(engine); engine->reset.prepare(engine);
intel_uc_reset_prepare(&gt->uc);
if (reset_engines(gt) || force) { if (reset_engines(gt) || force) {
for_each_engine(engine, gt, id) for_each_engine(engine, gt, id)
__intel_engine_reset(engine, false); __intel_engine_reset(engine, false);
...@@ -135,6 +171,9 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force) ...@@ -135,6 +171,9 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
for_each_engine(engine, gt, id) for_each_engine(engine, gt, id)
if (engine->reset.finish) if (engine->reset.finish)
engine->reset.finish(engine); engine->reset.finish(engine);
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
} }
void intel_gt_pm_fini(struct intel_gt *gt) void intel_gt_pm_fini(struct intel_gt *gt)
...@@ -148,6 +187,8 @@ int intel_gt_resume(struct intel_gt *gt) ...@@ -148,6 +187,8 @@ int intel_gt_resume(struct intel_gt *gt)
enum intel_engine_id id; enum intel_engine_id id;
int err = 0; int err = 0;
GEM_TRACE("\n");
/* /*
* After resume, we may need to poke into the pinned kernel * After resume, we may need to poke into the pinned kernel
* contexts to paper over any damage caused by the sudden suspend. * contexts to paper over any damage caused by the sudden suspend.
...@@ -186,14 +227,22 @@ int intel_gt_resume(struct intel_gt *gt) ...@@ -186,14 +227,22 @@ int intel_gt_resume(struct intel_gt *gt)
} }
intel_rc6_enable(&gt->rc6); intel_rc6_enable(&gt->rc6);
intel_uc_resume(&gt->uc);
user_forcewake(gt, false);
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_gt_pm_put(gt); intel_gt_pm_put(gt);
return err; return err;
} }
static void wait_for_idle(struct intel_gt *gt) static void wait_for_suspend(struct intel_gt *gt)
{ {
if (!intel_gt_pm_is_awake(gt))
return;
if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) { if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/* /*
* Forcibly cancel outstanding work and leave * Forcibly cancel outstanding work and leave
...@@ -205,27 +254,65 @@ static void wait_for_idle(struct intel_gt *gt) ...@@ -205,27 +254,65 @@ static void wait_for_idle(struct intel_gt *gt)
intel_gt_pm_wait_for_idle(gt); intel_gt_pm_wait_for_idle(gt);
} }
void intel_gt_suspend(struct intel_gt *gt) void intel_gt_suspend_prepare(struct intel_gt *gt)
{
user_forcewake(gt, true);
wait_for_suspend(gt);
intel_uc_suspend(&gt->uc);
}
static suspend_state_t pm_suspend_target(void)
{
#if IS_ENABLED(CONFIG_PM_SLEEP)
return pm_suspend_target_state;
#else
return PM_SUSPEND_TO_IDLE;
#endif
}
void intel_gt_suspend_late(struct intel_gt *gt)
{ {
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
/* We expect to be idle already; but also want to be independent */ /* We expect to be idle already; but also want to be independent */
wait_for_idle(gt); wait_for_suspend(gt);
/*
* On disabling the device, we want to turn off HW access to memory
* that we no longer own.
*
* However, not all suspend-states disable the device. S0 (s2idle)
* is effectively runtime-suspend, the device is left powered on
* but needs to be put into a low power state. We need to keep
* powermanagement enabled, but we also retain system state and so
* it remains safe to keep on using our allocated memory.
*/
if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
return;
with_intel_runtime_pm(gt->uncore->rpm, wakeref) { with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
intel_rps_disable(&gt->rps); intel_rps_disable(&gt->rps);
intel_rc6_disable(&gt->rc6); intel_rc6_disable(&gt->rc6);
intel_llc_disable(&gt->llc); intel_llc_disable(&gt->llc);
} }
intel_gt_sanitize(gt, false);
GEM_TRACE("\n");
} }
void intel_gt_runtime_suspend(struct intel_gt *gt) void intel_gt_runtime_suspend(struct intel_gt *gt)
{ {
intel_uc_runtime_suspend(&gt->uc); intel_uc_runtime_suspend(&gt->uc);
GEM_TRACE("\n");
} }
int intel_gt_runtime_resume(struct intel_gt *gt) int intel_gt_runtime_resume(struct intel_gt *gt)
{ {
GEM_TRACE("\n");
intel_gt_init_swizzling(gt); intel_gt_init_swizzling(gt);
return intel_uc_runtime_resume(&gt->uc); return intel_uc_runtime_resume(&gt->uc);
......
...@@ -43,8 +43,9 @@ void intel_gt_pm_fini(struct intel_gt *gt); ...@@ -43,8 +43,9 @@ void intel_gt_pm_fini(struct intel_gt *gt);
void intel_gt_sanitize(struct intel_gt *gt, bool force); void intel_gt_sanitize(struct intel_gt *gt, bool force);
void intel_gt_suspend_prepare(struct intel_gt *gt);
void intel_gt_suspend_late(struct intel_gt *gt);
int intel_gt_resume(struct intel_gt *gt); int intel_gt_resume(struct intel_gt *gt);
void intel_gt_suspend(struct intel_gt *gt);
void intel_gt_runtime_suspend(struct intel_gt *gt); void intel_gt_runtime_suspend(struct intel_gt *gt);
int intel_gt_runtime_resume(struct intel_gt *gt); int intel_gt_runtime_resume(struct intel_gt *gt);
......
...@@ -525,6 +525,11 @@ void intel_rc6_init(struct intel_rc6 *rc6) ...@@ -525,6 +525,11 @@ void intel_rc6_init(struct intel_rc6 *rc6)
void intel_rc6_sanitize(struct intel_rc6 *rc6) void intel_rc6_sanitize(struct intel_rc6 *rc6)
{ {
if (rc6->enabled) { /* unbalanced suspend/resume */
rpm_get(rc6);
rc6->enabled = false;
}
if (rc6->supported) if (rc6->supported)
__intel_rc6_disable(rc6); __intel_rc6_disable(rc6);
} }
......
...@@ -15,7 +15,8 @@ static int live_gt_resume(void *arg) ...@@ -15,7 +15,8 @@ static int live_gt_resume(void *arg)
/* Do several suspend/resume cycles to check we don't explode! */ /* Do several suspend/resume cycles to check we don't explode! */
do { do {
intel_gt_suspend(gt); intel_gt_suspend_prepare(gt);
intel_gt_suspend_late(gt);
if (gt->rc6.enabled) { if (gt->rc6.enabled) {
pr_err("rc6 still enabled after suspend!\n"); pr_err("rc6 still enabled after suspend!\n");
......
...@@ -603,8 +603,6 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) ...@@ -603,8 +603,6 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
if (ret) if (ret)
goto err_uncore; goto err_uncore;
i915_gem_init_mmio(dev_priv);
return 0; return 0;
err_uncore: err_uncore:
...@@ -1177,7 +1175,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) ...@@ -1177,7 +1175,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret) if (ret)
goto err_ggtt; goto err_ggtt;
intel_gt_init_hw_early(dev_priv); intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
ret = i915_ggtt_enable_hw(dev_priv); ret = i915_ggtt_enable_hw(dev_priv);
if (ret) { if (ret) {
...@@ -1821,7 +1819,7 @@ static int i915_drm_resume(struct drm_device *dev) ...@@ -1821,7 +1819,7 @@ static int i915_drm_resume(struct drm_device *dev)
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
i915_gem_sanitize(dev_priv); intel_gt_sanitize(&dev_priv->gt, true);
ret = i915_ggtt_enable_hw(dev_priv); ret = i915_ggtt_enable_hw(dev_priv);
if (ret) if (ret)
...@@ -1952,8 +1950,6 @@ static int i915_drm_resume_early(struct drm_device *dev) ...@@ -1952,8 +1950,6 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_power_domains_resume(dev_priv); intel_power_domains_resume(dev_priv);
intel_gt_sanitize(&dev_priv->gt, true);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret; return ret;
......
...@@ -1779,7 +1779,6 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, ...@@ -1779,7 +1779,6 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
/* i915_gem.c */ /* i915_gem.c */
int i915_gem_init_userptr(struct drm_i915_private *dev_priv); int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
void i915_gem_sanitize(struct drm_i915_private *i915);
void i915_gem_init_early(struct drm_i915_private *dev_priv); void i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv); int i915_gem_freeze(struct drm_i915_private *dev_priv);
...@@ -1863,7 +1862,6 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, ...@@ -1863,7 +1862,6 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
return atomic_read(&error->reset_engine_count[engine->uabi_class]); return atomic_read(&error->reset_engine_count[engine->uabi_class]);
} }
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
void i915_gem_driver_register(struct drm_i915_private *i915); void i915_gem_driver_register(struct drm_i915_private *i915);
void i915_gem_driver_unregister(struct drm_i915_private *i915); void i915_gem_driver_unregister(struct drm_i915_private *i915);
......
...@@ -1039,38 +1039,6 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, ...@@ -1039,38 +1039,6 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return err; return err;
} }
void i915_gem_sanitize(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
GEM_TRACE("\n");
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/*
* As we have just resumed the machine and woken the device up from
* deep PCI sleep (presumably D3_cold), assume the HW has been reset
* back to defaults, recovering from whatever wedged state we left it
* in and so worth trying to use the device once more.
*/
if (intel_gt_is_wedged(&i915->gt))
intel_gt_unset_wedged(&i915->gt);
/*
* If we inherit context state from the BIOS or earlier occupants
* of the GPU, the GPU may be in an inconsistent state when we
* try to take over. The only way to remove the earlier state
* is by resetting. However, resetting on earlier gen is tricky as
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
intel_gt_sanitize(&i915->gt, false);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static int __intel_engines_record_defaults(struct intel_gt *gt) static int __intel_engines_record_defaults(struct intel_gt *gt)
{ {
struct i915_request *requests[I915_NUM_ENGINES] = {}; struct i915_request *requests[I915_NUM_ENGINES] = {};
...@@ -1413,11 +1381,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv) ...@@ -1413,11 +1381,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
WARN_ON(!list_empty(&dev_priv->gem.contexts.list)); WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
} }
void i915_gem_init_mmio(struct drm_i915_private *i915)
{
i915_gem_sanitize(i915);
}
static void i915_gem_init__mm(struct drm_i915_private *i915) static void i915_gem_init__mm(struct drm_i915_private *i915)
{ {
spin_lock_init(&i915->mm.obj_lock); spin_lock_init(&i915->mm.obj_lock);
......
...@@ -202,21 +202,26 @@ static void kick_submission(struct intel_engine_cs *engine, ...@@ -202,21 +202,26 @@ static void kick_submission(struct intel_engine_cs *engine,
if (prio <= engine->execlists.queue_priority_hint) if (prio <= engine->execlists.queue_priority_hint)
return; return;
rcu_read_lock();
/* Nothing currently active? We're overdue for a submission! */ /* Nothing currently active? We're overdue for a submission! */
inflight = execlists_active(&engine->execlists); inflight = execlists_active(&engine->execlists);
if (!inflight) if (!inflight)
return; goto unlock;
/* /*
* If we are already the currently executing context, don't * If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves. * bother evaluating if we should preempt ourselves.
*/ */
if (inflight->hw_context == rq->hw_context) if (inflight->hw_context == rq->hw_context)
return; goto unlock;
engine->execlists.queue_priority_hint = prio; engine->execlists.queue_priority_hint = prio;
if (need_preempt(prio, rq_prio(inflight))) if (need_preempt(prio, rq_prio(inflight)))
tasklet_hi_schedule(&engine->execlists.tasklet); tasklet_hi_schedule(&engine->execlists.tasklet);
unlock:
rcu_read_unlock();
} }
static void __i915_schedule(struct i915_sched_node *node, static void __i915_schedule(struct i915_sched_node *node,
......
...@@ -124,7 +124,6 @@ static void pm_resume(struct drm_i915_private *i915) ...@@ -124,7 +124,6 @@ static void pm_resume(struct drm_i915_private *i915)
*/ */
with_intel_runtime_pm(&i915->runtime_pm, wakeref) { with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
intel_gt_sanitize(&i915->gt, false); intel_gt_sanitize(&i915->gt, false);
i915_gem_sanitize(i915);
i915_gem_restore_gtt_mappings(i915); i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(&i915->ggtt); i915_gem_restore_fences(&i915->ggtt);
......
...@@ -183,7 +183,6 @@ struct drm_i915_private *mock_gem_device(void) ...@@ -183,7 +183,6 @@ struct drm_i915_private *mock_gem_device(void)
intel_timelines_init(i915); intel_timelines_init(i915);
mock_init_ggtt(i915, &i915->ggtt); mock_init_ggtt(i915, &i915->ggtt);
i915->gt.ggtt = &i915->ggtt;
mkwrite_device_info(i915)->engine_mask = BIT(0); mkwrite_device_info(i915)->engine_mask = BIT(0);
......
...@@ -118,8 +118,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) ...@@ -118,8 +118,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.clear_pages = clear_pages; ggtt->vm.vma_ops.clear_pages = clear_pages;
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
i915->gt.ggtt = ggtt;
intel_gt_init_hw_early(i915);
} }
void mock_fini_ggtt(struct i915_ggtt *ggtt) void mock_fini_ggtt(struct i915_ggtt *ggtt)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment