Commit c4d52feb authored by Chris Wilson's avatar Chris Wilson

drm/i915: Move over to intel_context_lookup()

In preparation for an ever growing number of engines and so ever
increasing static array of HW contexts within the GEM context, move the
array over to an rbtree, allocated upon first use.

Unfortunately, this imposes an rbtree lookup at a few frequent callsites,
but we should be able to mitigate those by moving over to using the HW
context as our primary type and so only incur the lookup on the boundary
with the user GEM context and engines.

v2: Check for no HW context in guc_stage_desc_init
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190308132522.21573-4-chris@chris-wilson.co.uk
parent 4dc84b77
...@@ -94,6 +94,7 @@ i915-y += \ ...@@ -94,6 +94,7 @@ i915-y += \
i915_trace_points.o \ i915_trace_points.o \
i915_vma.o \ i915_vma.o \
intel_breadcrumbs.o \ intel_breadcrumbs.o \
intel_context.o \
intel_engine_cs.o \ intel_engine_cs.o \
intel_hangcheck.o \ intel_hangcheck.o \
intel_lrc.o \ intel_lrc.o \
......
...@@ -494,7 +494,8 @@ static void switch_mmio(struct intel_vgpu *pre, ...@@ -494,7 +494,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself. * itself.
*/ */
if (mmio->in_context && if (mmio->in_context &&
!is_inhibit_context(&s->shadow_ctx->__engine[ring_id])) !is_inhibit_context(intel_context_lookup(s->shadow_ctx,
dev_priv->engine[ring_id])))
continue; continue;
if (mmio->mask) if (mmio->mask)
......
...@@ -4655,15 +4655,20 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) ...@@ -4655,15 +4655,20 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
} }
for_each_engine(engine, i915, id) { for_each_engine(engine, i915, id) {
struct intel_context *ce;
struct i915_vma *state; struct i915_vma *state;
void *vaddr; void *vaddr;
GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count); ce = intel_context_lookup(ctx, engine);
if (!ce)
continue;
state = to_intel_context(ctx, engine)->state; state = ce->state;
if (!state) if (!state)
continue; continue;
GEM_BUG_ON(ce->pin_count);
/* /*
* As we will hold a reference to the logical state, it will * As we will hold a reference to the logical state, it will
* not be torn down with the context, and importantly the * not be torn down with the context, and importantly the
......
...@@ -95,7 +95,7 @@ ...@@ -95,7 +95,7 @@
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
static struct i915_global_context { static struct i915_global_gem_context {
struct i915_global base; struct i915_global base;
struct kmem_cache *slab_luts; struct kmem_cache *slab_luts;
} global; } global;
...@@ -222,7 +222,7 @@ static void release_hw_id(struct i915_gem_context *ctx) ...@@ -222,7 +222,7 @@ static void release_hw_id(struct i915_gem_context *ctx)
static void i915_gem_context_free(struct i915_gem_context *ctx) static void i915_gem_context_free(struct i915_gem_context *ctx)
{ {
unsigned int n; struct intel_context *it, *n;
lockdep_assert_held(&ctx->i915->drm.struct_mutex); lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
...@@ -231,12 +231,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) ...@@ -231,12 +231,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
release_hw_id(ctx); release_hw_id(ctx);
i915_ppgtt_put(ctx->ppgtt); i915_ppgtt_put(ctx->ppgtt);
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node)
struct intel_context *ce = &ctx->__engine[n]; it->ops->destroy(it);
if (ce->ops)
ce->ops->destroy(ce);
}
kfree(ctx->name); kfree(ctx->name);
put_pid(ctx->pid); put_pid(ctx->pid);
...@@ -340,40 +336,11 @@ static u32 default_desc_template(const struct drm_i915_private *i915, ...@@ -340,40 +336,11 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
return desc; return desc;
} }
static void intel_context_retire(struct i915_active_request *active,
struct i915_request *rq)
{
struct intel_context *ce =
container_of(active, typeof(*ce), active_tracker);
intel_context_unpin(ce);
}
void
intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
ce->gem_context = ctx;
ce->engine = engine;
ce->ops = engine->cops;
INIT_LIST_HEAD(&ce->signal_link);
INIT_LIST_HEAD(&ce->signals);
/* Use the whole device by default */
ce->sseu = intel_device_default_sseu(ctx->i915);
i915_active_request_init(&ce->active_tracker,
NULL, intel_context_retire);
}
static struct i915_gem_context * static struct i915_gem_context *
__create_hw_context(struct drm_i915_private *dev_priv, __create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv) struct drm_i915_file_private *file_priv)
{ {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
unsigned int n;
int ret; int ret;
int i; int i;
...@@ -388,8 +355,8 @@ __create_hw_context(struct drm_i915_private *dev_priv, ...@@ -388,8 +355,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
INIT_LIST_HEAD(&ctx->active_engines); INIT_LIST_HEAD(&ctx->active_engines);
mutex_init(&ctx->mutex); mutex_init(&ctx->mutex);
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) ctx->hw_contexts = RB_ROOT;
intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]); spin_lock_init(&ctx->hw_contexts_lock);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list); INIT_LIST_HEAD(&ctx->handles_list);
...@@ -728,8 +695,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915, ...@@ -728,8 +695,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
struct intel_ring *ring; struct intel_ring *ring;
struct i915_request *rq; struct i915_request *rq;
GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
rq = i915_request_alloc(engine, i915->kernel_context); rq = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
...@@ -865,13 +830,15 @@ static int get_sseu(struct i915_gem_context *ctx, ...@@ -865,13 +830,15 @@ static int get_sseu(struct i915_gem_context *ctx,
if (!engine) if (!engine)
return -EINVAL; return -EINVAL;
ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
/* Only use for mutex here is to serialize get_param and set_param. */ /* Only use for mutex here is to serialize get_param and set_param. */
ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
if (ret) if (ret)
return ret; return ret;
ce = to_intel_context(ctx, engine);
user_sseu.slice_mask = ce->sseu.slice_mask; user_sseu.slice_mask = ce->sseu.slice_mask;
user_sseu.subslice_mask = ce->sseu.subslice_mask; user_sseu.subslice_mask = ce->sseu.subslice_mask;
user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
...@@ -1037,12 +1004,16 @@ __i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx, ...@@ -1037,12 +1004,16 @@ __i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
struct intel_sseu sseu) struct intel_sseu sseu)
{ {
struct intel_context *ce = to_intel_context(ctx, engine); struct intel_context *ce;
int ret = 0; int ret = 0;
GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8); GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8);
GEM_BUG_ON(engine->id != RCS0); GEM_BUG_ON(engine->id != RCS0);
ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
/* Nothing to do if unmodified. */ /* Nothing to do if unmodified. */
if (!memcmp(&ce->sseu, &sseu, sizeof(sseu))) if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
return 0; return 0;
...@@ -1375,22 +1346,22 @@ int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx) ...@@ -1375,22 +1346,22 @@ int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
#include "selftests/i915_gem_context.c" #include "selftests/i915_gem_context.c"
#endif #endif
static void i915_global_context_shrink(void) static void i915_global_gem_context_shrink(void)
{ {
kmem_cache_shrink(global.slab_luts); kmem_cache_shrink(global.slab_luts);
} }
static void i915_global_context_exit(void) static void i915_global_gem_context_exit(void)
{ {
kmem_cache_destroy(global.slab_luts); kmem_cache_destroy(global.slab_luts);
} }
static struct i915_global_context global = { { static struct i915_global_gem_context global = { {
.shrink = i915_global_context_shrink, .shrink = i915_global_gem_context_shrink,
.exit = i915_global_context_exit, .exit = i915_global_gem_context_exit,
} }; } };
int __init i915_global_context_init(void) int __init i915_global_gem_context_init(void)
{ {
global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
if (!global.slab_luts) if (!global.slab_luts)
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#include "i915_scheduler.h" #include "i915_scheduler.h"
#include "intel_context.h" #include "intel_context.h"
#include "intel_device_info.h" #include "intel_device_info.h"
#include "intel_ringbuffer.h"
struct drm_device; struct drm_device;
struct drm_file; struct drm_file;
......
...@@ -13,10 +13,10 @@ ...@@ -13,10 +13,10 @@
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/radix-tree.h> #include <linux/radix-tree.h>
#include <linux/rbtree.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/types.h> #include <linux/types.h>
#include "i915_gem.h" /* I915_NUM_ENGINES */
#include "i915_scheduler.h" #include "i915_scheduler.h"
#include "intel_context_types.h" #include "intel_context_types.h"
...@@ -140,8 +140,9 @@ struct i915_gem_context { ...@@ -140,8 +140,9 @@ struct i915_gem_context {
struct i915_sched_attr sched; struct i915_sched_attr sched;
/** engine: per-engine logical HW state */ /** hw_contexts: per-engine logical HW state */
struct intel_context __engine[I915_NUM_ENGINES]; struct rb_root hw_contexts;
spinlock_t hw_contexts_lock;
/** ring_size: size for allocating the per-engine ring buffer */ /** ring_size: size for allocating the per-engine ring buffer */
u32 ring_size; u32 ring_size;
......
...@@ -794,8 +794,8 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb) ...@@ -794,8 +794,8 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
* keeping all of their resources pinned. * keeping all of their resources pinned.
*/ */
ce = to_intel_context(eb->ctx, eb->engine); ce = intel_context_lookup(eb->ctx, eb->engine);
if (!ce->ring) /* first use, assume empty! */ if (!ce || !ce->ring) /* first use, assume empty! */
return 0; return 0;
rq = __eb_wait_for_ring(ce->ring); rq = __eb_wait_for_ring(ce->ring);
......
...@@ -36,6 +36,7 @@ static void __i915_globals_cleanup(void) ...@@ -36,6 +36,7 @@ static void __i915_globals_cleanup(void)
static __initconst int (* const initfn[])(void) = { static __initconst int (* const initfn[])(void) = {
i915_global_active_init, i915_global_active_init,
i915_global_context_init, i915_global_context_init,
i915_global_gem_context_init,
i915_global_objects_init, i915_global_objects_init,
i915_global_request_init, i915_global_request_init,
i915_global_scheduler_init, i915_global_scheduler_init,
......
...@@ -26,6 +26,7 @@ void i915_globals_exit(void); ...@@ -26,6 +26,7 @@ void i915_globals_exit(void);
/* constructors */ /* constructors */
int i915_global_active_init(void); int i915_global_active_init(void);
int i915_global_context_init(void); int i915_global_context_init(void);
int i915_global_gem_context_init(void);
int i915_global_objects_init(void); int i915_global_objects_init(void);
int i915_global_request_init(void); int i915_global_request_init(void);
int i915_global_scheduler_init(void); int i915_global_scheduler_init(void);
......
...@@ -1740,11 +1740,11 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, ...@@ -1740,11 +1740,11 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
/* Update all contexts now that we've stalled the submission. */ /* Update all contexts now that we've stalled the submission. */
list_for_each_entry(ctx, &dev_priv->contexts.list, link) { list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
struct intel_context *ce = to_intel_context(ctx, engine); struct intel_context *ce = intel_context_lookup(ctx, engine);
u32 *regs; u32 *regs;
/* OA settings will be set upon first use */ /* OA settings will be set upon first use */
if (!ce->state) if (!ce || !ce->state)
continue; continue;
regs = i915_gem_object_pin_map(ce->state->obj, map_type); regs = i915_gem_object_pin_map(ce->state->obj, map_type);
......
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_gem_context.h"
#include "i915_globals.h"
#include "intel_context.h"
#include "intel_ringbuffer.h"
static struct i915_global_context {
struct i915_global base;
struct kmem_cache *slab_ce;
} global;
struct intel_context *intel_context_alloc(void)
{
return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
}
void intel_context_free(struct intel_context *ce)
{
kmem_cache_free(global.slab_ce, ce);
}
struct intel_context *
intel_context_lookup(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct intel_context *ce = NULL;
struct rb_node *p;
spin_lock(&ctx->hw_contexts_lock);
p = ctx->hw_contexts.rb_node;
while (p) {
struct intel_context *this =
rb_entry(p, struct intel_context, node);
if (this->engine == engine) {
GEM_BUG_ON(this->gem_context != ctx);
ce = this;
break;
}
if (this->engine < engine)
p = p->rb_right;
else
p = p->rb_left;
}
spin_unlock(&ctx->hw_contexts_lock);
return ce;
}
struct intel_context *
__intel_context_insert(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_context *ce)
{
struct rb_node **p, *parent;
int err = 0;
spin_lock(&ctx->hw_contexts_lock);
parent = NULL;
p = &ctx->hw_contexts.rb_node;
while (*p) {
struct intel_context *this;
parent = *p;
this = rb_entry(parent, struct intel_context, node);
if (this->engine == engine) {
err = -EEXIST;
ce = this;
break;
}
if (this->engine < engine)
p = &parent->rb_right;
else
p = &parent->rb_left;
}
if (!err) {
rb_link_node(&ce->node, parent, p);
rb_insert_color(&ce->node, &ctx->hw_contexts);
}
spin_unlock(&ctx->hw_contexts_lock);
return ce;
}
void __intel_context_remove(struct intel_context *ce)
{
struct i915_gem_context *ctx = ce->gem_context;
spin_lock(&ctx->hw_contexts_lock);
rb_erase(&ce->node, &ctx->hw_contexts);
spin_unlock(&ctx->hw_contexts_lock);
}
struct intel_context *
intel_context_instance(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct intel_context *ce, *pos;
ce = intel_context_lookup(ctx, engine);
if (likely(ce))
return ce;
ce = intel_context_alloc();
if (!ce)
return ERR_PTR(-ENOMEM);
intel_context_init(ce, ctx, engine);
pos = __intel_context_insert(ctx, engine, ce);
if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */
intel_context_free(ce);
GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
return pos;
}
static void intel_context_retire(struct i915_active_request *active,
struct i915_request *rq)
{
struct intel_context *ce =
container_of(active, typeof(*ce), active_tracker);
intel_context_unpin(ce);
}
void
intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
ce->gem_context = ctx;
ce->engine = engine;
ce->ops = engine->cops;
INIT_LIST_HEAD(&ce->signal_link);
INIT_LIST_HEAD(&ce->signals);
/* Use the whole device by default */
ce->sseu = intel_device_default_sseu(ctx->i915);
i915_active_request_init(&ce->active_tracker,
NULL, intel_context_retire);
}
static void i915_global_context_shrink(void)
{
kmem_cache_shrink(global.slab_ce);
}
static void i915_global_context_exit(void)
{
kmem_cache_destroy(global.slab_ce);
}
static struct i915_global_context global = { {
.shrink = i915_global_context_shrink,
.exit = i915_global_context_exit,
} };
int __init i915_global_context_init(void)
{
global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
if (!global.slab_ce)
return -ENOMEM;
i915_global_register(&global.base);
return 0;
}
...@@ -7,20 +7,46 @@ ...@@ -7,20 +7,46 @@
#ifndef __INTEL_CONTEXT_H__ #ifndef __INTEL_CONTEXT_H__
#define __INTEL_CONTEXT_H__ #define __INTEL_CONTEXT_H__
#include "i915_gem_context_types.h"
#include "intel_context_types.h" #include "intel_context_types.h"
#include "intel_engine_types.h" #include "intel_engine_types.h"
struct intel_context *intel_context_alloc(void);
void intel_context_free(struct intel_context *ce);
void intel_context_init(struct intel_context *ce, void intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx, struct i915_gem_context *ctx,
struct intel_engine_cs *engine); struct intel_engine_cs *engine);
static inline struct intel_context * /**
to_intel_context(struct i915_gem_context *ctx, * intel_context_lookup - Find the matching HW context for this (ctx, engine)
const struct intel_engine_cs *engine) * @ctx - the parent GEM context
{ * @engine - the target HW engine
return &ctx->__engine[engine->id]; *
} * May return NULL if the HW context hasn't been instantiated (i.e. unused).
*/
struct intel_context *
intel_context_lookup(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
/**
* intel_context_instance - Lookup or allocate the HW context for (ctx, engine)
* @ctx - the parent GEM context
* @engine - the target HW engine
*
* Returns the existing HW context for this pair of (GEM context, engine), or
* allocates and initialises a fresh context. Once allocated, the HW context
* remains resident until the GEM context is destroyed.
*/
struct intel_context *
intel_context_instance(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
struct intel_context *
__intel_context_insert(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_context *ce);
void
__intel_context_remove(struct intel_context *ce);
static inline struct intel_context * static inline struct intel_context *
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#define __INTEL_CONTEXT_TYPES__ #define __INTEL_CONTEXT_TYPES__
#include <linux/list.h> #include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/types.h> #include <linux/types.h>
#include "i915_active_types.h" #include "i915_active_types.h"
...@@ -52,6 +53,7 @@ struct intel_context { ...@@ -52,6 +53,7 @@ struct intel_context {
struct i915_active_request active_tracker; struct i915_active_request active_tracker;
const struct intel_context_ops *ops; const struct intel_context_ops *ops;
struct rb_node node;
/** sseu: Control eu/slice partitioning */ /** sseu: Control eu/slice partitioning */
struct intel_sseu sseu; struct intel_sseu sseu;
......
...@@ -644,7 +644,7 @@ void intel_engines_set_scheduler_caps(struct drm_i915_private *i915) ...@@ -644,7 +644,7 @@ void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
static void __intel_context_unpin(struct i915_gem_context *ctx, static void __intel_context_unpin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
intel_context_unpin(to_intel_context(ctx, engine)); intel_context_unpin(intel_context_lookup(ctx, engine));
} }
struct measure_breadcrumb { struct measure_breadcrumb {
......
...@@ -231,6 +231,11 @@ struct intel_engine_execlists { ...@@ -231,6 +231,11 @@ struct intel_engine_execlists {
*/ */
u32 *csb_status; u32 *csb_status;
/**
* @preempt_context: the HW context for injecting preempt-to-idle
*/
struct intel_context *preempt_context;
/** /**
* @preempt_complete_status: expected CSB upon completing preemption * @preempt_complete_status: expected CSB upon completing preemption
*/ */
......
...@@ -121,8 +121,8 @@ int intel_guc_ads_create(struct intel_guc *guc) ...@@ -121,8 +121,8 @@ int intel_guc_ads_create(struct intel_guc *guc)
* to find it. Note that we have to skip our header (1 page), * to find it. Note that we have to skip our header (1 page),
* because our GuC shared data is there. * because our GuC shared data is there.
*/ */
kernel_ctx_vma = to_intel_context(dev_priv->kernel_context, kernel_ctx_vma = intel_context_lookup(dev_priv->kernel_context,
dev_priv->engine[RCS0])->state; dev_priv->engine[RCS0])->state;
blob->ads.golden_context_lrca = blob->ads.golden_context_lrca =
intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset; intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
......
...@@ -382,7 +382,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client) ...@@ -382,7 +382,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
desc->db_id = client->doorbell_id; desc->db_id = client->doorbell_id;
for_each_engine_masked(engine, dev_priv, client->engines, tmp) { for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
struct intel_context *ce = to_intel_context(ctx, engine); struct intel_context *ce = intel_context_lookup(ctx, engine);
u32 guc_engine_id = engine->guc_id; u32 guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id]; struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
...@@ -393,7 +393,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client) ...@@ -393,7 +393,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
* for now who owns a GuC client. But for future owner of GuC * for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here. * client, need to make sure lrc is pinned prior to enter here.
*/ */
if (!ce->state) if (!ce || !ce->state)
break; /* XXX: continue? */ break; /* XXX: continue? */
/* /*
...@@ -567,7 +567,7 @@ static void inject_preempt_context(struct work_struct *work) ...@@ -567,7 +567,7 @@ static void inject_preempt_context(struct work_struct *work)
preempt_work[engine->id]); preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client; struct intel_guc_client *client = guc->preempt_client;
struct guc_stage_desc *stage_desc = __get_stage_desc(client); struct guc_stage_desc *stage_desc = __get_stage_desc(client);
struct intel_context *ce = to_intel_context(client->owner, engine); struct intel_context *ce = intel_context_lookup(client->owner, engine);
u32 data[7]; u32 data[7];
if (!ce->ring->emit) { /* recreate upon load/resume */ if (!ce->ring->emit) { /* recreate upon load/resume */
......
...@@ -628,8 +628,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq) ...@@ -628,8 +628,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
static void inject_preempt_context(struct intel_engine_cs *engine) static void inject_preempt_context(struct intel_engine_cs *engine)
{ {
struct intel_engine_execlists *execlists = &engine->execlists; struct intel_engine_execlists *execlists = &engine->execlists;
struct intel_context *ce = struct intel_context *ce = execlists->preempt_context;
to_intel_context(engine->i915->preempt_context, engine);
unsigned int n; unsigned int n;
GEM_BUG_ON(execlists->preempt_complete_status != GEM_BUG_ON(execlists->preempt_complete_status !=
...@@ -1237,19 +1236,24 @@ static void execlists_submit_request(struct i915_request *request) ...@@ -1237,19 +1236,24 @@ static void execlists_submit_request(struct i915_request *request)
spin_unlock_irqrestore(&engine->timeline.lock, flags); spin_unlock_irqrestore(&engine->timeline.lock, flags);
} }
static void execlists_context_destroy(struct intel_context *ce) static void __execlists_context_fini(struct intel_context *ce)
{ {
GEM_BUG_ON(ce->pin_count);
if (!ce->state)
return;
intel_ring_free(ce->ring); intel_ring_free(ce->ring);
GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
i915_gem_object_put(ce->state->obj); i915_gem_object_put(ce->state->obj);
} }
static void execlists_context_destroy(struct intel_context *ce)
{
GEM_BUG_ON(ce->pin_count);
if (ce->state)
__execlists_context_fini(ce);
intel_context_free(ce);
}
static void execlists_context_unpin(struct intel_context *ce) static void execlists_context_unpin(struct intel_context *ce)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
...@@ -1390,7 +1394,11 @@ static struct intel_context * ...@@ -1390,7 +1394,11 @@ static struct intel_context *
execlists_context_pin(struct intel_engine_cs *engine, execlists_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx) struct i915_gem_context *ctx)
{ {
struct intel_context *ce = to_intel_context(ctx, engine); struct intel_context *ce;
ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return ce;
lockdep_assert_held(&ctx->i915->drm.struct_mutex); lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!ctx->ppgtt); GEM_BUG_ON(!ctx->ppgtt);
...@@ -2441,8 +2449,9 @@ static int logical_ring_init(struct intel_engine_cs *engine) ...@@ -2441,8 +2449,9 @@ static int logical_ring_init(struct intel_engine_cs *engine)
execlists->preempt_complete_status = ~0u; execlists->preempt_complete_status = ~0u;
if (i915->preempt_context) { if (i915->preempt_context) {
struct intel_context *ce = struct intel_context *ce =
to_intel_context(i915->preempt_context, engine); intel_context_lookup(i915->preempt_context, engine);
execlists->preempt_context = ce;
execlists->preempt_complete_status = execlists->preempt_complete_status =
upper_32_bits(ce->lrc_desc); upper_32_bits(ce->lrc_desc);
} }
......
...@@ -1349,15 +1349,20 @@ intel_ring_free(struct intel_ring *ring) ...@@ -1349,15 +1349,20 @@ intel_ring_free(struct intel_ring *ring)
kfree(ring); kfree(ring);
} }
static void __ring_context_fini(struct intel_context *ce)
{
GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
i915_gem_object_put(ce->state->obj);
}
static void ring_context_destroy(struct intel_context *ce) static void ring_context_destroy(struct intel_context *ce)
{ {
GEM_BUG_ON(ce->pin_count); GEM_BUG_ON(ce->pin_count);
if (!ce->state) if (ce->state)
return; __ring_context_fini(ce);
GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); intel_context_free(ce);
i915_gem_object_put(ce->state->obj);
} }
static int __context_pin_ppgtt(struct i915_gem_context *ctx) static int __context_pin_ppgtt(struct i915_gem_context *ctx)
...@@ -1552,7 +1557,11 @@ __ring_context_pin(struct intel_engine_cs *engine, ...@@ -1552,7 +1557,11 @@ __ring_context_pin(struct intel_engine_cs *engine,
static struct intel_context * static struct intel_context *
ring_context_pin(struct intel_engine_cs *engine, struct i915_gem_context *ctx) ring_context_pin(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{ {
struct intel_context *ce = to_intel_context(ctx, engine); struct intel_context *ce;
ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return ce;
lockdep_assert_held(&ctx->i915->drm.struct_mutex); lockdep_assert_held(&ctx->i915->drm.struct_mutex);
...@@ -1754,8 +1763,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) ...@@ -1754,8 +1763,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
* placeholder we use to flush other contexts. * placeholder we use to flush other contexts.
*/ */
*cs++ = MI_SET_CONTEXT; *cs++ = MI_SET_CONTEXT;
*cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context, *cs++ = i915_ggtt_offset(intel_context_lookup(i915->kernel_context,
engine)->state) | engine)->state) |
MI_MM_SPACE_GTT | MI_MM_SPACE_GTT |
MI_RESTORE_INHIBIT; MI_RESTORE_INHIBIT;
} }
......
...@@ -30,7 +30,6 @@ mock_context(struct drm_i915_private *i915, ...@@ -30,7 +30,6 @@ mock_context(struct drm_i915_private *i915,
const char *name) const char *name)
{ {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
unsigned int n;
int ret; int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
...@@ -41,15 +40,15 @@ mock_context(struct drm_i915_private *i915, ...@@ -41,15 +40,15 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link); INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915; ctx->i915 = i915;
ctx->hw_contexts = RB_ROOT;
spin_lock_init(&ctx->hw_contexts_lock);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list); INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link); INIT_LIST_HEAD(&ctx->hw_id_link);
INIT_LIST_HEAD(&ctx->active_engines); INIT_LIST_HEAD(&ctx->active_engines);
mutex_init(&ctx->mutex); mutex_init(&ctx->mutex);
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
intel_context_init(&ctx->__engine[n], ctx, i915->engine[n]);
ret = i915_gem_context_pin_hw_id(ctx); ret = i915_gem_context_pin_hw_id(ctx);
if (ret < 0) if (ret < 0)
goto err_handles; goto err_handles;
......
...@@ -141,9 +141,13 @@ static struct intel_context * ...@@ -141,9 +141,13 @@ static struct intel_context *
mock_context_pin(struct intel_engine_cs *engine, mock_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx) struct i915_gem_context *ctx)
{ {
struct intel_context *ce = to_intel_context(ctx, engine); struct intel_context *ce;
int err = -ENOMEM; int err = -ENOMEM;
ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return ce;
if (ce->pin_count++) if (ce->pin_count++)
return ce; return ce;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment