Commit 9a512e23 authored by Colin Xu's avatar Colin Xu Committed by Zhenyu Wang

drm/i915/gvt: Use sched_lock to protect gvt scheduler logic.

The scheduler lock(gvt->sched_lock) is used to protect gvt
scheduler logic, including the gvt scheduler structure(gvt->scheduler
and per vgpu schedule data(vgpu->sched_data, vgpu->sched_ctl).

v9:
  - Change commit author since the patches are improved a lot compared
    with original version.
    Original author: Pei Zhang <pei.zhang@intel.com>
  - Rebase to latest gvt-staging.
v8:
  - Correct coding wqstyle.
  - Rebase to latest gvt-staging.
v7:
  - Remove gtt_lock since already proteced by gvt_lock and vgpu_lock.
v6:
  - Rebase to latest gvt-staging.
v5:
  - Rebase to latest gvt-staging.
v4:
  - Rebase to latest gvt-staging.
v3: update to latest code base
Signed-off-by: default avatarPei Zhang <pei.zhang@intel.com>
Signed-off-by: default avatarColin Xu <colin.xu@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent f25a49ab
...@@ -376,6 +376,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ...@@ -376,6 +376,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
idr_init(&gvt->vgpu_idr); idr_init(&gvt->vgpu_idr);
spin_lock_init(&gvt->scheduler.mmio_context_lock); spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock); mutex_init(&gvt->lock);
mutex_init(&gvt->sched_lock);
gvt->dev_priv = dev_priv; gvt->dev_priv = dev_priv;
init_device_info(gvt); init_device_info(gvt);
......
...@@ -177,6 +177,11 @@ struct intel_vgpu { ...@@ -177,6 +177,11 @@ struct intel_vgpu {
bool pv_notified; bool pv_notified;
bool failsafe; bool failsafe;
unsigned int resetting_eng; unsigned int resetting_eng;
/* Both sched_data and sched_ctl can be seen a part of the global gvt
* scheduler structure. So below 2 vgpu data are protected
* by sched_lock, not vgpu_lock.
*/
void *sched_data; void *sched_data;
struct vgpu_sched_ctl sched_ctl; struct vgpu_sched_ctl sched_ctl;
...@@ -299,6 +304,9 @@ struct intel_gvt { ...@@ -299,6 +304,9 @@ struct intel_gvt {
* not yet protected by special locks(vgpu and scheduler lock). * not yet protected by special locks(vgpu and scheduler lock).
*/ */
struct mutex lock; struct mutex lock;
/* scheduler scope lock, protect gvt and vgpu schedule related data */
struct mutex sched_lock;
struct drm_i915_private *dev_priv; struct drm_i915_private *dev_priv;
struct idr vgpu_idr; /* vGPU IDR pool */ struct idr vgpu_idr; /* vGPU IDR pool */
......
...@@ -228,7 +228,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt) ...@@ -228,7 +228,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
ktime_t cur_time; ktime_t cur_time;
mutex_lock(&gvt->lock); mutex_lock(&gvt->sched_lock);
cur_time = ktime_get(); cur_time = ktime_get();
if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
...@@ -244,7 +244,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt) ...@@ -244,7 +244,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time); vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
tbs_sched_func(sched_data); tbs_sched_func(sched_data);
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->sched_lock);
} }
static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
...@@ -359,39 +359,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { ...@@ -359,39 +359,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
int intel_gvt_init_sched_policy(struct intel_gvt *gvt) int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
{ {
int ret;
mutex_lock(&gvt->sched_lock);
gvt->scheduler.sched_ops = &tbs_schedule_ops; gvt->scheduler.sched_ops = &tbs_schedule_ops;
ret = gvt->scheduler.sched_ops->init(gvt);
mutex_unlock(&gvt->sched_lock);
return gvt->scheduler.sched_ops->init(gvt); return ret;
} }
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt) void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
{ {
mutex_lock(&gvt->sched_lock);
gvt->scheduler.sched_ops->clean(gvt); gvt->scheduler.sched_ops->clean(gvt);
mutex_unlock(&gvt->sched_lock);
} }
/* for per-vgpu scheduler policy, there are 2 per-vgpu data:
* sched_data, and sched_ctl. We see these 2 data as part of
* the global scheduler which are proteced by gvt->sched_lock.
* Caller should make their decision if the vgpu_lock should
* be hold outside.
*/
int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu) int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
{ {
return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu); int ret;
mutex_lock(&vgpu->gvt->sched_lock);
ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
mutex_unlock(&vgpu->gvt->sched_lock);
return ret;
} }
void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
{ {
mutex_lock(&vgpu->gvt->sched_lock);
vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu); vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
mutex_unlock(&vgpu->gvt->sched_lock);
} }
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
{ {
struct vgpu_sched_data *vgpu_data = vgpu->sched_data; struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
mutex_lock(&vgpu->gvt->sched_lock);
if (!vgpu_data->active) { if (!vgpu_data->active) {
gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
} }
mutex_unlock(&vgpu->gvt->sched_lock);
} }
void intel_gvt_kick_schedule(struct intel_gvt *gvt) void intel_gvt_kick_schedule(struct intel_gvt *gvt)
{ {
mutex_lock(&gvt->sched_lock);
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
mutex_unlock(&gvt->sched_lock);
} }
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
...@@ -406,6 +432,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) ...@@ -406,6 +432,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
mutex_lock(&vgpu->gvt->sched_lock);
scheduler->sched_ops->stop_schedule(vgpu); scheduler->sched_ops->stop_schedule(vgpu);
if (scheduler->next_vgpu == vgpu) if (scheduler->next_vgpu == vgpu)
...@@ -425,4 +452,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) ...@@ -425,4 +452,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
} }
} }
spin_unlock_bh(&scheduler->mmio_context_lock); spin_unlock_bh(&scheduler->mmio_context_lock);
mutex_unlock(&vgpu->gvt->sched_lock);
} }
...@@ -715,7 +715,7 @@ static struct intel_vgpu_workload *pick_next_workload( ...@@ -715,7 +715,7 @@ static struct intel_vgpu_workload *pick_next_workload(
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL; struct intel_vgpu_workload *workload = NULL;
mutex_lock(&gvt->lock); mutex_lock(&gvt->sched_lock);
/* /*
* no current vgpu / will be scheduled out / no workload * no current vgpu / will be scheduled out / no workload
...@@ -761,7 +761,7 @@ static struct intel_vgpu_workload *pick_next_workload( ...@@ -761,7 +761,7 @@ static struct intel_vgpu_workload *pick_next_workload(
atomic_inc(&workload->vgpu->submission.running_workload_num); atomic_inc(&workload->vgpu->submission.running_workload_num);
out: out:
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->sched_lock);
return workload; return workload;
} }
...@@ -862,8 +862,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -862,8 +862,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
int event; int event;
mutex_lock(&gvt->lock);
mutex_lock(&vgpu->vgpu_lock); mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&gvt->sched_lock);
/* For the workload w/ request, needs to wait for the context /* For the workload w/ request, needs to wait for the context
* switch to make sure request is completed. * switch to make sure request is completed.
...@@ -941,8 +941,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -941,8 +941,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
if (gvt->scheduler.need_reschedule) if (gvt->scheduler.need_reschedule)
intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
mutex_unlock(&gvt->sched_lock);
mutex_unlock(&vgpu->vgpu_lock); mutex_unlock(&vgpu->vgpu_lock);
mutex_unlock(&gvt->lock);
} }
struct workload_thread_param { struct workload_thread_param {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment