Commit 41874148 authored by Colin Xu's avatar Colin Xu Committed by Zhenyu Wang

drm/i915/gvt: Adding ppgtt to GVT GEM context after shadow pdps settled.

Windows guest can't run after force-TDR with host log:
...
gvt: vgpu 1: workload shadow ppgtt isn't ready
gvt: vgpu 1: fail to dispatch workload, skip
...

The error is raised by set_context_ppgtt_from_shadow(), when it checks
and found the shadow_mm isn't marked as shadowed.

In work thread before each submission, a shadow_mm is set to shadowed in:
shadow_ppgtt_mm()
<-intel_vgpu_pin_mm()
<-prepare_workload()
<-dispatch_workload()
<-workload_thread()
However checking whether or not shadow_mm is shadowed is prior to it:
set_context_ppgtt_from_shadow()
<-dispatch_workload()
<-workload_thread()

In normal case, create workload will check the existence of shadow_mm,
if not it will create a new one and marked as shadowed. If already exist
it will reuse the old one. Since shadow_mm is reused, checking of shadowed
in set_context_ppgtt_from_shadow() actually always see the state set in
creation, but not the state set in intel_vgpu_pin_mm().

When force-TDR, all engines are reset, since it's not dmlr level, all
ppgtt_mm are invalidated but not destroyed. Invalidation will mark all
reused shadow_mm as not shadowed but still keeps in ppgtt_mm_list_head.
If workload submission phase those shadow_mm are reused with shadowed
not set, then set_context_ppgtt_from_shadow() will report error.

Pin for context after shadow_mm pinned and shadow pdps settled.

v2:
Move set_context_ppgtt_from_shadow() after prepare_workload(). (zhenyu)
v3:
Move set_context_ppgtt_from_shadow() after shadow pdps updated.(zhenyu)

Fixes: 4f15665c ("drm/i915: Add ppgtt to GVT GEM context")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarColin Xu <colin.xu@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent ef5b0b44
...@@ -364,16 +364,13 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -364,16 +364,13 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx->indirect_ctx.shadow_va = NULL; wa_ctx->indirect_ctx.shadow_va = NULL;
} }
static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
struct i915_gem_context *ctx) struct i915_gem_context *ctx)
{ {
struct intel_vgpu_mm *mm = workload->shadow_mm; struct intel_vgpu_mm *mm = workload->shadow_mm;
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm); struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
int i = 0; int i = 0;
if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
return -EINVAL;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0]; px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
} else { } else {
...@@ -384,8 +381,6 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, ...@@ -384,8 +381,6 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
} }
} }
return 0;
} }
static int static int
...@@ -614,6 +609,8 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) ...@@ -614,6 +609,8 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
static int prepare_workload(struct intel_vgpu_workload *workload) static int prepare_workload(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
int ring = workload->ring_id;
int ret = 0; int ret = 0;
ret = intel_vgpu_pin_mm(workload->shadow_mm); ret = intel_vgpu_pin_mm(workload->shadow_mm);
...@@ -622,8 +619,16 @@ static int prepare_workload(struct intel_vgpu_workload *workload) ...@@ -622,8 +619,16 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
return ret; return ret;
} }
if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
!workload->shadow_mm->ppgtt_mm.shadowed) {
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
return -EINVAL;
}
update_shadow_pdps(workload); update_shadow_pdps(workload);
set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
ret = intel_vgpu_sync_oos_pages(workload->vgpu); ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) { if (ret) {
gvt_vgpu_err("fail to vgpu sync oos pages\n"); gvt_vgpu_err("fail to vgpu sync oos pages\n");
...@@ -674,7 +679,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -674,7 +679,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_request *rq; struct i915_request *rq;
int ring_id = workload->ring_id; int ring_id = workload->ring_id;
int ret; int ret;
...@@ -685,13 +689,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -685,13 +689,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&vgpu->vgpu_lock); mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
ret = set_context_ppgtt_from_shadow(workload,
s->shadow[ring_id]->gem_context);
if (ret < 0) {
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
goto err_req;
}
ret = intel_gvt_workload_req_alloc(workload); ret = intel_gvt_workload_req_alloc(workload);
if (ret) if (ret)
goto err_req; goto err_req;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment