Commit eafbc207 authored by Jani Nikula's avatar Jani Nikula

Merge tag 'gvt-fixes-2017-07-11' of https://github.com/01org/gvt-linux into drm-intel-next-fixes

gvt-fixes-2017-07-11

- Revert "drm/i915/gvt: Fix possible recursive locking issue" (Chuanxiao),
  which is incomplete fix and it's actually VFIO issue, so revert.
- remove unneeded scheduler mutex for performance fix (Weinan)
- other misc error handling fix and cmd address audit
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170711055333.jhrmvx6ilvg2qlnn@zhen-hp.sh.intel.com
parents 50740024 0cf5ec41
...@@ -2536,6 +2536,11 @@ static int scan_workload(struct intel_vgpu_workload *workload) ...@@ -2536,6 +2536,11 @@ static int scan_workload(struct intel_vgpu_workload *workload)
gma_head == gma_tail) gma_head == gma_tail)
return 0; return 0;
if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
ret = -EINVAL;
goto out;
}
ret = ip_gma_set(&s, gma_head); ret = ip_gma_set(&s, gma_head);
if (ret) if (ret)
goto out; goto out;
...@@ -2579,6 +2584,11 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2579,6 +2584,11 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.rb_va = wa_ctx->indirect_ctx.shadow_va; s.rb_va = wa_ctx->indirect_ctx.shadow_va;
s.workload = workload; s.workload = workload;
if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
ret = -EINVAL;
goto out;
}
ret = ip_gma_set(&s, gma_head); ret = ip_gma_set(&s, gma_head);
if (ret) if (ret)
goto out; goto out;
......
...@@ -2259,6 +2259,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ...@@ -2259,6 +2259,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
ret = setup_spt_oos(gvt); ret = setup_spt_oos(gvt);
if (ret) { if (ret) {
gvt_err("fail to initialize SPT oos\n"); gvt_err("fail to initialize SPT oos\n");
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
__free_page(gvt->gtt.scratch_ggtt_page);
return ret; return ret;
} }
} }
......
...@@ -182,9 +182,6 @@ struct intel_vgpu { ...@@ -182,9 +182,6 @@ struct intel_vgpu {
struct kvm *kvm; struct kvm *kvm;
struct work_struct release_work; struct work_struct release_work;
atomic_t released; atomic_t released;
struct work_struct unpin_work;
spinlock_t unpin_lock; /* To protect unpin_list */
struct list_head unpin_list;
} vdev; } vdev;
#endif #endif
}; };
......
...@@ -78,7 +78,6 @@ struct gvt_dma { ...@@ -78,7 +78,6 @@ struct gvt_dma {
struct rb_node node; struct rb_node node;
gfn_t gfn; gfn_t gfn;
unsigned long iova; unsigned long iova;
struct list_head list;
}; };
static inline bool handle_valid(unsigned long handle) static inline bool handle_valid(unsigned long handle)
...@@ -167,7 +166,6 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, ...@@ -167,7 +166,6 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
new->gfn = gfn; new->gfn = gfn;
new->iova = iova; new->iova = iova;
INIT_LIST_HEAD(&new->list);
mutex_lock(&vgpu->vdev.cache_lock); mutex_lock(&vgpu->vdev.cache_lock);
while (*link) { while (*link) {
...@@ -199,52 +197,26 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, ...@@ -199,52 +197,26 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
kfree(entry); kfree(entry);
} }
static void intel_vgpu_unpin_work(struct work_struct *work) static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
{ {
struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
vdev.unpin_work);
struct device *dev = mdev_dev(vgpu->vdev.mdev); struct device *dev = mdev_dev(vgpu->vdev.mdev);
struct gvt_dma *this; struct gvt_dma *this;
unsigned long gfn; unsigned long g1;
int rc;
for (;;) {
spin_lock(&vgpu->vdev.unpin_lock);
if (list_empty(&vgpu->vdev.unpin_list)) {
spin_unlock(&vgpu->vdev.unpin_lock);
break;
}
this = list_first_entry(&vgpu->vdev.unpin_list,
struct gvt_dma, list);
list_del(&this->list);
spin_unlock(&vgpu->vdev.unpin_lock);
gfn = this->gfn;
vfio_unpin_pages(dev, &gfn, 1);
kfree(this);
}
}
static bool gvt_cache_mark_remove(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct gvt_dma *this;
mutex_lock(&vgpu->vdev.cache_lock); mutex_lock(&vgpu->vdev.cache_lock);
this = __gvt_cache_find(vgpu, gfn); this = __gvt_cache_find(vgpu, gfn);
if (!this) { if (!this) {
mutex_unlock(&vgpu->vdev.cache_lock); mutex_unlock(&vgpu->vdev.cache_lock);
return false; return;
} }
g1 = gfn;
gvt_dma_unmap_iova(vgpu, this->iova); gvt_dma_unmap_iova(vgpu, this->iova);
/* remove this from rb tree */ rc = vfio_unpin_pages(dev, &g1, 1);
rb_erase(&this->node, &vgpu->vdev.cache); WARN_ON(rc != 1);
__gvt_cache_remove_entry(vgpu, this);
mutex_unlock(&vgpu->vdev.cache_lock); mutex_unlock(&vgpu->vdev.cache_lock);
/* put this to the unpin_list */
spin_lock(&vgpu->vdev.unpin_lock);
list_move_tail(&this->list, &vgpu->vdev.unpin_list);
spin_unlock(&vgpu->vdev.unpin_lock);
return true;
} }
static void gvt_cache_init(struct intel_vgpu *vgpu) static void gvt_cache_init(struct intel_vgpu *vgpu)
...@@ -485,9 +457,6 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) ...@@ -485,9 +457,6 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
} }
INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
INIT_WORK(&vgpu->vdev.unpin_work, intel_vgpu_unpin_work);
spin_lock_init(&vgpu->vdev.unpin_lock);
INIT_LIST_HEAD(&vgpu->vdev.unpin_list);
vgpu->vdev.mdev = mdev; vgpu->vdev.mdev = mdev;
mdev_set_drvdata(mdev, vgpu); mdev_set_drvdata(mdev, vgpu);
...@@ -517,7 +486,6 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, ...@@ -517,7 +486,6 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
struct intel_vgpu *vgpu = container_of(nb, struct intel_vgpu *vgpu = container_of(nb,
struct intel_vgpu, struct intel_vgpu,
vdev.iommu_notifier); vdev.iommu_notifier);
bool sched_unmap = false;
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
struct vfio_iommu_type1_dma_unmap *unmap = data; struct vfio_iommu_type1_dma_unmap *unmap = data;
...@@ -527,10 +495,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, ...@@ -527,10 +495,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
end_gfn = gfn + unmap->size / PAGE_SIZE; end_gfn = gfn + unmap->size / PAGE_SIZE;
while (gfn < end_gfn) while (gfn < end_gfn)
sched_unmap |= gvt_cache_mark_remove(vgpu, gfn++); gvt_cache_remove(vgpu, gfn++);
if (sched_unmap)
schedule_work(&vgpu->vdev.unpin_work);
} }
return NOTIFY_OK; return NOTIFY_OK;
......
...@@ -174,15 +174,6 @@ static int shadow_context_status_change(struct notifier_block *nb, ...@@ -174,15 +174,6 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1); atomic_set(&workload->shadow_ctx_active, 1);
break; break;
case INTEL_CONTEXT_SCHEDULE_OUT: case INTEL_CONTEXT_SCHEDULE_OUT:
/* If the status is -EINPROGRESS means this workload
* doesn't meet any issue during dispatching so when
* get the SCHEDULE_OUT set the status to be zero for
* good. If the status is NOT -EINPROGRESS means there
* is something wrong happened during dispatching and
* the status should not be set to zero
*/
if (workload->status == -EINPROGRESS)
workload->status = 0;
atomic_set(&workload->shadow_ctx_active, 0); atomic_set(&workload->shadow_ctx_active, 0);
break; break;
default: default:
...@@ -427,6 +418,18 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -427,6 +418,18 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
wait_event(workload->shadow_ctx_status_wq, wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active)); !atomic_read(&workload->shadow_ctx_active));
/* If this request caused GPU hang, req->fence.error will
* be set to -EIO. Use -EIO to set workload status so
* that when this request caused GPU hang, didn't trigger
* context switch interrupt to guest.
*/
if (likely(workload->status == -EINPROGRESS)) {
if (workload->req->fence.error == -EIO)
workload->status = -EIO;
else
workload->status = 0;
}
i915_gem_request_put(fetch_and_zero(&workload->req)); i915_gem_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !vgpu->resetting) { if (!workload->status && !vgpu->resetting) {
...@@ -464,8 +467,6 @@ struct workload_thread_param { ...@@ -464,8 +467,6 @@ struct workload_thread_param {
int ring_id; int ring_id;
}; };
static DEFINE_MUTEX(scheduler_mutex);
static int workload_thread(void *priv) static int workload_thread(void *priv)
{ {
struct workload_thread_param *p = (struct workload_thread_param *)priv; struct workload_thread_param *p = (struct workload_thread_param *)priv;
...@@ -497,8 +498,6 @@ static int workload_thread(void *priv) ...@@ -497,8 +498,6 @@ static int workload_thread(void *priv)
if (!workload) if (!workload)
break; break;
mutex_lock(&scheduler_mutex);
gvt_dbg_sched("ring id %d next workload %p vgpu %d\n", gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
workload->ring_id, workload, workload->ring_id, workload,
workload->vgpu->id); workload->vgpu->id);
...@@ -537,9 +536,6 @@ static int workload_thread(void *priv) ...@@ -537,9 +536,6 @@ static int workload_thread(void *priv)
FORCEWAKE_ALL); FORCEWAKE_ALL);
intel_runtime_pm_put(gvt->dev_priv); intel_runtime_pm_put(gvt->dev_priv);
mutex_unlock(&scheduler_mutex);
} }
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment