Commit e55978a4 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-fixes-2022-10-13' of...

Merge tag 'drm-intel-next-fixes-2022-10-13' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

- Fix revocation of non-persistent contexts (Tvrtko Ursulin)
- Handle migration for dpt (Matthew Auld)
- Fix display problems after resume (Thomas Hellström)
- Allow control over the flags when migrating (Matthew Auld)
- Consider DG2_RC_CCS_CC when migrating buffers (Matthew Auld)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/Y0gK9QmCmktLLzqp@tursulin-desk
parents d6fe5887 ea19684a
...@@ -26,10 +26,17 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb, ...@@ -26,10 +26,17 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
struct drm_device *dev = fb->dev; struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_gem_ww_ctx ww;
struct i915_vma *vma; struct i915_vma *vma;
u32 alignment; u32 alignment;
int ret; int ret;
/*
* We are not syncing against the binding (and potential migrations)
* below, so this vm must never be async.
*/
GEM_WARN_ON(vm->bind_async_flags);
if (WARN_ON(!i915_gem_object_is_framebuffer(obj))) if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -37,29 +44,48 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb, ...@@ -37,29 +44,48 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin); atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
ret = i915_gem_object_lock_interruptible(obj, NULL); for_i915_gem_ww(&ww, ret, true) {
if (!ret) { ret = i915_gem_object_lock(obj, &ww);
if (ret)
continue;
if (HAS_LMEM(dev_priv)) {
unsigned int flags = obj->flags;
/*
* For this type of buffer we need to able to read from the CPU
* the clear color value found in the buffer, hence we need to
* ensure it is always in the mappable part of lmem, if this is
* a small-bar device.
*/
if (intel_fb_rc_ccs_cc_plane(fb) >= 0)
flags &= ~I915_BO_ALLOC_GPU_ONLY;
ret = __i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0,
flags);
if (ret)
continue;
}
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
i915_gem_object_unlock(obj); if (ret)
} continue;
if (ret) {
vma = ERR_PTR(ret);
goto err;
}
vma = i915_vma_instance(obj, vm, view); vma = i915_vma_instance(obj, vm, view);
if (IS_ERR(vma)) if (IS_ERR(vma)) {
goto err; ret = PTR_ERR(vma);
continue;
}
if (i915_vma_misplaced(vma, 0, alignment, 0)) { if (i915_vma_misplaced(vma, 0, alignment, 0)) {
ret = i915_vma_unbind_unlocked(vma); ret = i915_vma_unbind(vma);
if (ret) { if (ret)
vma = ERR_PTR(ret); continue;
goto err;
} }
}
ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL); ret = i915_vma_pin_ww(vma, &ww, 0, alignment, PIN_GLOBAL);
if (ret)
continue;
}
if (ret) { if (ret) {
vma = ERR_PTR(ret); vma = ERR_PTR(ret);
goto err; goto err;
......
...@@ -1383,14 +1383,8 @@ kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent) ...@@ -1383,14 +1383,8 @@ kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent)
*/ */
for_each_gem_engine(ce, engines, it) { for_each_gem_engine(ce, engines, it) {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
bool skip = false;
if (exit) if ((exit || !persistent) && intel_context_revoke(ce))
skip = intel_context_set_exiting(ce);
else if (!persistent)
skip = intel_context_exit_nonpersistent(ce, NULL);
if (skip)
continue; /* Already marked. */ continue; /* Already marked. */
/* /*
......
...@@ -652,6 +652,41 @@ bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj, ...@@ -652,6 +652,41 @@ bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
int i915_gem_object_migrate(struct drm_i915_gem_object *obj, int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww, struct i915_gem_ww_ctx *ww,
enum intel_region_id id) enum intel_region_id id)
{
return __i915_gem_object_migrate(obj, ww, id, obj->flags);
}
/**
* __i915_gem_object_migrate - Migrate an object to the desired region id, with
* control of the extra flags
* @obj: The object to migrate.
* @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may
* not be successful in evicting other objects to make room for this object.
* @id: The region id to migrate to.
* @flags: The object flags. Normally just obj->flags.
*
* Attempt to migrate the object to the desired memory region. The
* object backend must support migration and the object may not be
* pinned, (explicitly pinned pages or pinned vmas). The object must
* be locked.
* On successful completion, the object will have pages pointing to
* memory in the new region, but an async migration task may not have
* completed yet, and to accomplish that, i915_gem_object_wait_migration()
* must be called.
*
* Note: the @ww parameter is not used yet, but included to make sure
* callers put some effort into obtaining a valid ww ctx if one is
* available.
*
* Return: 0 on success. Negative error code on failure. In particular may
* return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
* if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
* -EBUSY if the object is pinned.
*/
int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
enum intel_region_id id,
unsigned int flags)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_memory_region *mr; struct intel_memory_region *mr;
...@@ -672,7 +707,7 @@ int i915_gem_object_migrate(struct drm_i915_gem_object *obj, ...@@ -672,7 +707,7 @@ int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
return 0; return 0;
} }
return obj->ops->migrate(obj, mr); return obj->ops->migrate(obj, mr, flags);
} }
/** /**
......
...@@ -608,6 +608,10 @@ bool i915_gem_object_migratable(struct drm_i915_gem_object *obj); ...@@ -608,6 +608,10 @@ bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
int i915_gem_object_migrate(struct drm_i915_gem_object *obj, int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww, struct i915_gem_ww_ctx *ww,
enum intel_region_id id); enum intel_region_id id);
int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
enum intel_region_id id,
unsigned int flags);
bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj, bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
enum intel_region_id id); enum intel_region_id id);
......
...@@ -107,7 +107,8 @@ struct drm_i915_gem_object_ops { ...@@ -107,7 +107,8 @@ struct drm_i915_gem_object_ops {
* pinning or for as long as the object lock is held. * pinning or for as long as the object lock is held.
*/ */
int (*migrate)(struct drm_i915_gem_object *obj, int (*migrate)(struct drm_i915_gem_object *obj,
struct intel_memory_region *mr); struct intel_memory_region *mr,
unsigned int flags);
void (*release)(struct drm_i915_gem_object *obj); void (*release)(struct drm_i915_gem_object *obj);
......
...@@ -848,9 +848,10 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj, ...@@ -848,9 +848,10 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
} }
static int i915_ttm_migrate(struct drm_i915_gem_object *obj, static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
struct intel_memory_region *mr) struct intel_memory_region *mr,
unsigned int flags)
{ {
return __i915_ttm_migrate(obj, mr, obj->flags); return __i915_ttm_migrate(obj, mr, flags);
} }
static void i915_ttm_put_pages(struct drm_i915_gem_object *obj, static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
......
...@@ -614,13 +614,12 @@ bool intel_context_ban(struct intel_context *ce, struct i915_request *rq) ...@@ -614,13 +614,12 @@ bool intel_context_ban(struct intel_context *ce, struct i915_request *rq)
return ret; return ret;
} }
bool intel_context_exit_nonpersistent(struct intel_context *ce, bool intel_context_revoke(struct intel_context *ce)
struct i915_request *rq)
{ {
bool ret = intel_context_set_exiting(ce); bool ret = intel_context_set_exiting(ce);
if (ce->ops->revoke) if (ce->ops->revoke)
ce->ops->revoke(ce, rq, ce->engine->props.preempt_timeout_ms); ce->ops->revoke(ce, NULL, ce->engine->props.preempt_timeout_ms);
return ret; return ret;
} }
......
...@@ -329,8 +329,7 @@ static inline bool intel_context_set_exiting(struct intel_context *ce) ...@@ -329,8 +329,7 @@ static inline bool intel_context_set_exiting(struct intel_context *ce)
return test_and_set_bit(CONTEXT_EXITING, &ce->flags); return test_and_set_bit(CONTEXT_EXITING, &ce->flags);
} }
bool intel_context_exit_nonpersistent(struct intel_context *ce, bool intel_context_revoke(struct intel_context *ce);
struct i915_request *rq);
static inline bool static inline bool
intel_context_force_single_submission(const struct intel_context *ce) intel_context_force_single_submission(const struct intel_context *ce)
......
...@@ -1275,10 +1275,16 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm) ...@@ -1275,10 +1275,16 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
atomic_read(&vma->flags) & I915_VMA_BIND_MASK; atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound); GEM_BUG_ON(!was_bound);
if (!retained_ptes) if (!retained_ptes) {
/*
* Clear the bound flags of the vma resource to allow
* ptes to be repopulated.
*/
vma->resource->bound_flags = 0;
vma->ops->bind_vma(vm, NULL, vma->resource, vma->ops->bind_vma(vm, NULL, vma->resource,
obj ? obj->cache_level : 0, obj ? obj->cache_level : 0,
was_bound); was_bound);
}
if (obj) { /* only used during resume => exclusive access */ if (obj) { /* only used during resume => exclusive access */
write_domain_objs |= fetch_and_zero(&obj->write_domain); write_domain_objs |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT; obj->read_domains |= I915_GEM_DOMAIN_GTT;
......
...@@ -684,7 +684,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq) ...@@ -684,7 +684,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
* Corner case where requests were sitting in the priority list or a * Corner case where requests were sitting in the priority list or a
* request resubmitted after the context was banned. * request resubmitted after the context was banned.
*/ */
if (unlikely(intel_context_is_banned(ce))) { if (unlikely(!intel_context_is_schedulable(ce))) {
i915_request_put(i915_request_mark_eio(rq)); i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(ce->engine); intel_engine_signal_breadcrumbs(ce->engine);
return 0; return 0;
...@@ -870,15 +870,15 @@ static int guc_wq_item_append(struct intel_guc *guc, ...@@ -870,15 +870,15 @@ static int guc_wq_item_append(struct intel_guc *guc,
struct i915_request *rq) struct i915_request *rq)
{ {
struct intel_context *ce = request_to_scheduling_context(rq); struct intel_context *ce = request_to_scheduling_context(rq);
int ret = 0; int ret;
if (likely(!intel_context_is_banned(ce))) { if (unlikely(!intel_context_is_schedulable(ce)))
ret = __guc_wq_item_append(rq); return 0;
if (unlikely(ret == -EBUSY)) { ret = __guc_wq_item_append(rq);
guc->stalled_request = rq; if (unlikely(ret == -EBUSY)) {
guc->submission_stall_reason = STALL_MOVE_LRC_TAIL; guc->stalled_request = rq;
} guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
} }
return ret; return ret;
...@@ -897,7 +897,7 @@ static bool multi_lrc_submit(struct i915_request *rq) ...@@ -897,7 +897,7 @@ static bool multi_lrc_submit(struct i915_request *rq)
* submitting all the requests generated in parallel. * submitting all the requests generated in parallel.
*/ */
return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) || return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
intel_context_is_banned(ce); !intel_context_is_schedulable(ce);
} }
static int guc_dequeue_one_context(struct intel_guc *guc) static int guc_dequeue_one_context(struct intel_guc *guc)
...@@ -966,7 +966,7 @@ static int guc_dequeue_one_context(struct intel_guc *guc) ...@@ -966,7 +966,7 @@ static int guc_dequeue_one_context(struct intel_guc *guc)
struct intel_context *ce = request_to_scheduling_context(last); struct intel_context *ce = request_to_scheduling_context(last);
if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) && if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
!intel_context_is_banned(ce))) { intel_context_is_schedulable(ce))) {
ret = try_context_registration(ce, false); ret = try_context_registration(ce, false);
if (unlikely(ret == -EPIPE)) { if (unlikely(ret == -EPIPE)) {
goto deadlk; goto deadlk;
...@@ -1576,7 +1576,7 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub) ...@@ -1576,7 +1576,7 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
{ {
struct intel_engine_cs *engine = __context_to_physical_engine(ce); struct intel_engine_cs *engine = __context_to_physical_engine(ce);
if (intel_context_is_banned(ce)) if (!intel_context_is_schedulable(ce))
return; return;
GEM_BUG_ON(!intel_context_is_pinned(ce)); GEM_BUG_ON(!intel_context_is_pinned(ce));
...@@ -4424,12 +4424,12 @@ static void guc_handle_context_reset(struct intel_guc *guc, ...@@ -4424,12 +4424,12 @@ static void guc_handle_context_reset(struct intel_guc *guc,
{ {
trace_intel_context_reset(ce); trace_intel_context_reset(ce);
if (likely(!intel_context_is_banned(ce))) { if (likely(intel_context_is_schedulable(ce))) {
capture_error_state(guc, ce); capture_error_state(guc, ce);
guc_context_replay(ce); guc_context_replay(ce);
} else { } else {
drm_info(&guc_to_gt(guc)->i915->drm, drm_info(&guc_to_gt(guc)->i915->drm,
"Ignoring context reset notification of banned context 0x%04X on %s", "Ignoring context reset notification of exiting context 0x%04X on %s",
ce->guc_id.id, ce->engine->name); ce->guc_id.id, ce->engine->name);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment