Commit f31d83f0 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2020-03-12' of...

Merge tag 'drm-intel-fixes-2020-03-12' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

drm/i915 fixes for v5.6-rc6:
- hard lockup fix
- GVT fixes
- 32-bit alignment issue fix
- timeline wait fixes
- cacheline_retire and free
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87lfo6ksvw.fsf@intel.com
parents d9443265 14a0d527
...@@ -423,7 +423,8 @@ eb_validate_vma(struct i915_execbuffer *eb, ...@@ -423,7 +423,8 @@ eb_validate_vma(struct i915_execbuffer *eb,
if (unlikely(entry->flags & eb->invalid_flags)) if (unlikely(entry->flags & eb->invalid_flags))
return -EINVAL; return -EINVAL;
if (unlikely(entry->alignment && !is_power_of_2(entry->alignment))) if (unlikely(entry->alignment &&
!is_power_of_2_u64(entry->alignment)))
return -EINVAL; return -EINVAL;
/* /*
......
...@@ -1679,11 +1679,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) ...@@ -1679,11 +1679,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
if (!intel_engine_has_timeslices(engine)) if (!intel_engine_has_timeslices(engine))
return false; return false;
if (list_is_last(&rq->sched.link, &engine->active.requests)) hint = engine->execlists.queue_priority_hint;
return false; if (!list_is_last(&rq->sched.link, &engine->active.requests))
hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
hint = max(rq_prio(list_next_entry(rq, sched.link)),
engine->execlists.queue_priority_hint);
return hint >= effective_prio(rq); return hint >= effective_prio(rq);
} }
...@@ -1725,6 +1723,18 @@ static void set_timeslice(struct intel_engine_cs *engine) ...@@ -1725,6 +1723,18 @@ static void set_timeslice(struct intel_engine_cs *engine)
set_timer_ms(&engine->execlists.timer, active_timeslice(engine)); set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
} }
static void start_timeslice(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
execlists->switch_priority_hint = execlists->queue_priority_hint;
if (timer_pending(&execlists->timer))
return;
set_timer_ms(&execlists->timer, timeslice(engine));
}
static void record_preemption(struct intel_engine_execlists *execlists) static void record_preemption(struct intel_engine_execlists *execlists)
{ {
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
...@@ -1888,11 +1898,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1888,11 +1898,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* Even if ELSP[1] is occupied and not worthy * Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be. * of timeslices, our queue might be.
*/ */
if (!execlists->timer.expires && start_timeslice(engine);
need_timeslice(engine, last))
set_timer_ms(&execlists->timer,
timeslice(engine));
return; return;
} }
} }
...@@ -1927,7 +1933,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1927,7 +1933,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last && !can_merge_rq(last, rq)) { if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock); spin_unlock(&ve->base.active.lock);
return; /* leave this for another */ start_timeslice(engine);
return; /* leave this for another sibling */
} }
ENGINE_TRACE(engine, ENGINE_TRACE(engine,
......
...@@ -192,11 +192,15 @@ static void cacheline_release(struct intel_timeline_cacheline *cl) ...@@ -192,11 +192,15 @@ static void cacheline_release(struct intel_timeline_cacheline *cl)
static void cacheline_free(struct intel_timeline_cacheline *cl) static void cacheline_free(struct intel_timeline_cacheline *cl)
{ {
if (!i915_active_acquire_if_busy(&cl->active)) {
__idle_cacheline_free(cl);
return;
}
GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE)); GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE); cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
if (i915_active_is_idle(&cl->active)) i915_active_release(&cl->active);
__idle_cacheline_free(cl);
} }
int intel_timeline_init(struct intel_timeline *timeline, int intel_timeline_init(struct intel_timeline *timeline,
......
...@@ -457,7 +457,8 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) ...@@ -457,7 +457,8 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
/* TODO: add more platforms support */ /* TODO: add more platforms support */
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv)) {
if (connected) { if (connected) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDID_DETECTED; SFUSE_STRAP_DDID_DETECTED;
......
...@@ -147,15 +147,14 @@ static void virt_vbt_generation(struct vbt *v) ...@@ -147,15 +147,14 @@ static void virt_vbt_generation(struct vbt *v)
/* there's features depending on version! */ /* there's features depending on version! */
v->header.version = 155; v->header.version = 155;
v->header.header_size = sizeof(v->header); v->header.header_size = sizeof(v->header);
v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header); v->header.vbt_size = sizeof(struct vbt);
v->header.bdb_offset = offsetof(struct vbt, bdb_header); v->header.bdb_offset = offsetof(struct vbt, bdb_header);
strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
v->bdb_header.version = 186; /* child_dev_size = 33 */ v->bdb_header.version = 186; /* child_dev_size = 33 */
v->bdb_header.header_size = sizeof(v->bdb_header); v->bdb_header.header_size = sizeof(v->bdb_header);
v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header) v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header);
- sizeof(struct bdb_header);
/* general features */ /* general features */
v->general_features_header.id = BDB_GENERAL_FEATURES; v->general_features_header.id = BDB_GENERAL_FEATURES;
......
...@@ -272,10 +272,17 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) ...@@ -272,10 +272,17 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
mutex_lock(&vgpu->vgpu_lock);
WARN(vgpu->active, "vGPU is still active!\n"); WARN(vgpu->active, "vGPU is still active!\n");
/*
* remove idr first so later clean can judge if need to stop
* service if no active vgpu.
*/
mutex_lock(&gvt->lock);
idr_remove(&gvt->vgpu_idr, vgpu->id);
mutex_unlock(&gvt->lock);
mutex_lock(&vgpu->vgpu_lock);
intel_gvt_debugfs_remove_vgpu(vgpu); intel_gvt_debugfs_remove_vgpu(vgpu);
intel_vgpu_clean_sched_policy(vgpu); intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_submission(vgpu); intel_vgpu_clean_submission(vgpu);
...@@ -290,7 +297,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) ...@@ -290,7 +297,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
mutex_unlock(&vgpu->vgpu_lock); mutex_unlock(&vgpu->vgpu_lock);
mutex_lock(&gvt->lock); mutex_lock(&gvt->lock);
idr_remove(&gvt->vgpu_idr, vgpu->id);
if (idr_is_empty(&gvt->vgpu_idr)) if (idr_is_empty(&gvt->vgpu_idr))
intel_gvt_clean_irq(gvt); intel_gvt_clean_irq(gvt);
intel_gvt_update_vgpu_types(gvt); intel_gvt_update_vgpu_types(gvt);
......
...@@ -527,19 +527,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) ...@@ -527,19 +527,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE; return NOTIFY_DONE;
} }
static void irq_semaphore_cb(struct irq_work *wrk)
{
struct i915_request *rq =
container_of(wrk, typeof(*rq), semaphore_work);
i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
i915_request_put(rq);
}
static int __i915_sw_fence_call static int __i915_sw_fence_call
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{ {
struct i915_request *request = struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
container_of(fence, typeof(*request), semaphore);
switch (state) { switch (state) {
case FENCE_COMPLETE: case FENCE_COMPLETE:
i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE); if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
i915_request_get(rq);
init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
irq_work_queue(&rq->semaphore_work);
}
break; break;
case FENCE_FREE: case FENCE_FREE:
i915_request_put(request); i915_request_put(rq);
break; break;
} }
...@@ -776,8 +788,8 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal) ...@@ -776,8 +788,8 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
struct dma_fence *fence; struct dma_fence *fence;
int err; int err;
GEM_BUG_ON(i915_request_timeline(rq) == if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
rcu_access_pointer(signal->timeline)); return 0;
if (i915_request_started(signal)) if (i915_request_started(signal))
return 0; return 0;
...@@ -821,7 +833,7 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal) ...@@ -821,7 +833,7 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
return 0; return 0;
err = 0; err = 0;
if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
err = i915_sw_fence_await_dma_fence(&rq->submit, err = i915_sw_fence_await_dma_fence(&rq->submit,
fence, 0, fence, 0,
I915_FENCE_GFP); I915_FENCE_GFP);
...@@ -1318,9 +1330,9 @@ void __i915_request_queue(struct i915_request *rq, ...@@ -1318,9 +1330,9 @@ void __i915_request_queue(struct i915_request *rq,
* decide whether to preempt the entire chain so that it is ready to * decide whether to preempt the entire chain so that it is ready to
* run at the earliest possible convenience. * run at the earliest possible convenience.
*/ */
i915_sw_fence_commit(&rq->semaphore);
if (attr && rq->engine->schedule) if (attr && rq->engine->schedule)
rq->engine->schedule(rq, attr); rq->engine->schedule(rq, attr);
i915_sw_fence_commit(&rq->semaphore);
i915_sw_fence_commit(&rq->submit); i915_sw_fence_commit(&rq->submit);
} }
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define I915_REQUEST_H #define I915_REQUEST_H
#include <linux/dma-fence.h> #include <linux/dma-fence.h>
#include <linux/irq_work.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include "gem/i915_gem_context_types.h" #include "gem/i915_gem_context_types.h"
...@@ -208,6 +209,7 @@ struct i915_request { ...@@ -208,6 +209,7 @@ struct i915_request {
}; };
struct list_head execute_cb; struct list_head execute_cb;
struct i915_sw_fence semaphore; struct i915_sw_fence semaphore;
struct irq_work semaphore_work;
/* /*
* A list of everyone we wait upon, and everyone who waits upon us. * A list of everyone we wait upon, and everyone who waits upon us.
......
...@@ -234,6 +234,11 @@ static inline u64 ptr_to_u64(const void *ptr) ...@@ -234,6 +234,11 @@ static inline u64 ptr_to_u64(const void *ptr)
__idx; \ __idx; \
}) })
static inline bool is_power_of_2_u64(u64 n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
static inline void __list_del_many(struct list_head *head, static inline void __list_del_many(struct list_head *head,
struct list_head *first) struct list_head *first)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment