Commit 76444b6e authored by Rodrigo Vivi's avatar Rodrigo Vivi

Merge tag 'gvt-fixes-2019-03-21' of https://github.com/intel/gvt-linux into drm-intel-fixes

gvt-fixes-2019-03-21

- Fix MI_FLUSH_DW cmd parser on index check (Zhenyu)
- Fix Windows guest font render error (Colin)
- Fix unexpected workload submission for inactive vGPU (Weinan)
- Fix incorrect workload submission in error path (Zhenyu)
- Fix warning for shadow ppgtt mm reclaim list walk with locking (Zhenyu)
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321035018.GF10798@zhen-hp.sh.intel.com
parents 000c4f90 72aabfb8
...@@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s, ...@@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
} }
if (index_mode) { if (index_mode) {
if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) { if (guest_gma >= I915_GTT_PAGE_SIZE) {
ret = -EFAULT; ret = -EFAULT;
goto err; goto err;
} }
......
...@@ -1882,7 +1882,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, ...@@ -1882,7 +1882,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
} }
list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head); list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
mutex_lock(&gvt->gtt.ppgtt_mm_lock);
list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head); list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
return mm; return mm;
} }
...@@ -1967,9 +1971,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm) ...@@ -1967,9 +1971,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
if (ret) if (ret)
return ret; return ret;
mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
list_move_tail(&mm->ppgtt_mm.lru_list, list_move_tail(&mm->ppgtt_mm.lru_list,
&mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head); &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
} }
return 0; return 0;
...@@ -1980,6 +1985,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt) ...@@ -1980,6 +1985,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
struct intel_vgpu_mm *mm; struct intel_vgpu_mm *mm;
struct list_head *pos, *n; struct list_head *pos, *n;
mutex_lock(&gvt->gtt.ppgtt_mm_lock);
list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list); mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
...@@ -1987,9 +1994,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt) ...@@ -1987,9 +1994,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
continue; continue;
list_del_init(&mm->ppgtt_mm.lru_list); list_del_init(&mm->ppgtt_mm.lru_list);
mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
invalidate_ppgtt_mm(mm); invalidate_ppgtt_mm(mm);
return 1; return 1;
} }
mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
return 0; return 0;
} }
...@@ -2659,6 +2668,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ...@@ -2659,6 +2668,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
} }
} }
INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head); INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
mutex_init(&gvt->gtt.ppgtt_mm_lock);
return 0; return 0;
} }
...@@ -2699,7 +2709,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) ...@@ -2699,7 +2709,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
if (mm->type == INTEL_GVT_MM_PPGTT) { if (mm->type == INTEL_GVT_MM_PPGTT) {
mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
list_del_init(&mm->ppgtt_mm.lru_list); list_del_init(&mm->ppgtt_mm.lru_list);
mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
if (mm->ppgtt_mm.shadowed) if (mm->ppgtt_mm.shadowed)
invalidate_ppgtt_mm(mm); invalidate_ppgtt_mm(mm);
} }
......
...@@ -88,6 +88,7 @@ struct intel_gvt_gtt { ...@@ -88,6 +88,7 @@ struct intel_gvt_gtt {
void (*mm_free_page_table)(struct intel_vgpu_mm *mm); void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
struct list_head oos_page_use_list_head; struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head; struct list_head oos_page_free_list_head;
struct mutex ppgtt_mm_lock;
struct list_head ppgtt_mm_lru_list_head; struct list_head ppgtt_mm_lru_list_head;
struct page *scratch_page; struct page *scratch_page;
......
...@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { ...@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
{RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
{RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
......
...@@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, ...@@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
int i = 0; int i = 0;
if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
return -1; return -EINVAL;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0]; px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
...@@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) ...@@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (workload->shadow) if (workload->shadow)
return 0; return 0;
ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
if (ret < 0) {
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
return ret;
}
/* pin shadow context by gvt even the shadow context will be pinned /* pin shadow context by gvt even the shadow context will be pinned
* when i915 alloc request. That is because gvt will update the guest * when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that * context from shadow context when workload is completed, and at that
...@@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
struct i915_request *rq;
int ring_id = workload->ring_id; int ring_id = workload->ring_id;
int ret; int ret;
...@@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&vgpu->vgpu_lock); mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
if (ret < 0) {
gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
goto err_req;
}
ret = intel_gvt_workload_req_alloc(workload); ret = intel_gvt_workload_req_alloc(workload);
if (ret) if (ret)
goto err_req; goto err_req;
...@@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
ret = prepare_workload(workload); ret = prepare_workload(workload);
out: out:
if (ret) {
/* We might still need to add request with
* clean ctx to retire it properly..
*/
rq = fetch_and_zero(&workload->req);
i915_request_put(rq);
}
if (!IS_ERR_OR_NULL(workload->req)) { if (!IS_ERR_OR_NULL(workload->req)) {
gvt_dbg_sched("ring id %d submit workload to i915 %p\n", gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req); ring_id, workload->req);
...@@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload( ...@@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload(
goto out; goto out;
} }
if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) if (!scheduler->current_vgpu->active ||
list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
goto out; goto out;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment