Commit 1a9c4db4 authored by Michał Winiarski's avatar Michał Winiarski Committed by Matt Roper

drm/i915/gem: Use to_gt() helper

Use to_gt() helper consistently throughout the codebase.
Pure mechanical s/i915->gt/to_gt(i915). No functional changes.
Signed-off-by: default avatarMichał Winiarski <michal.winiarski@intel.com>
Signed-off-by: default avatarAndi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Signed-off-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211214193346.21231-6-andi.shyti@linux.intel.com
parent c14adcbd
...@@ -237,7 +237,7 @@ static int proto_context_set_persistence(struct drm_i915_private *i915, ...@@ -237,7 +237,7 @@ static int proto_context_set_persistence(struct drm_i915_private *i915,
* colateral damage, and we should not pretend we can by * colateral damage, and we should not pretend we can by
* exposing the interface. * exposing the interface.
*/ */
if (!intel_has_reset_engine(&i915->gt)) if (!intel_has_reset_engine(to_gt(i915)))
return -ENODEV; return -ENODEV;
pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE); pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
...@@ -254,7 +254,7 @@ static int proto_context_set_protected(struct drm_i915_private *i915, ...@@ -254,7 +254,7 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
if (!protected) { if (!protected) {
pc->uses_protected_content = false; pc->uses_protected_content = false;
} else if (!intel_pxp_is_enabled(&i915->gt.pxp)) { } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) {
ret = -ENODEV; ret = -ENODEV;
} else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) || } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
!(pc->user_flags & BIT(UCONTEXT_BANNABLE))) { !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
...@@ -268,8 +268,8 @@ static int proto_context_set_protected(struct drm_i915_private *i915, ...@@ -268,8 +268,8 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
*/ */
pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (!intel_pxp_is_active(&i915->gt.pxp)) if (!intel_pxp_is_active(&to_gt(i915)->pxp))
ret = intel_pxp_start(&i915->gt.pxp); ret = intel_pxp_start(&to_gt(i915)->pxp);
} }
return ret; return ret;
...@@ -571,7 +571,7 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base, ...@@ -571,7 +571,7 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
intel_engine_mask_t prev_mask; intel_engine_mask_t prev_mask;
/* FIXME: This is NIY for execlists */ /* FIXME: This is NIY for execlists */
if (!(intel_uc_uses_guc_submission(&i915->gt.uc))) if (!(intel_uc_uses_guc_submission(&to_gt(i915)->uc)))
return -ENODEV; return -ENODEV;
if (get_user(slot, &ext->engine_index)) if (get_user(slot, &ext->engine_index))
...@@ -833,7 +833,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv, ...@@ -833,7 +833,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
sseu = &pc->legacy_rcs_sseu; sseu = &pc->legacy_rcs_sseu;
} }
ret = i915_gem_user_to_context_sseu(&i915->gt, &user_sseu, sseu); ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
if (ret) if (ret)
return ret; return ret;
...@@ -1044,7 +1044,7 @@ static struct i915_gem_engines *alloc_engines(unsigned int count) ...@@ -1044,7 +1044,7 @@ static struct i915_gem_engines *alloc_engines(unsigned int count)
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx, static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
struct intel_sseu rcs_sseu) struct intel_sseu rcs_sseu)
{ {
const struct intel_gt *gt = &ctx->i915->gt; const struct intel_gt *gt = to_gt(ctx->i915);
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct i915_gem_engines *e, *err; struct i915_gem_engines *e, *err;
enum intel_engine_id id; enum intel_engine_id id;
...@@ -1521,7 +1521,7 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state) ...@@ -1521,7 +1521,7 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
* colateral damage, and we should not pretend we can by * colateral damage, and we should not pretend we can by
* exposing the interface. * exposing the interface.
*/ */
if (!intel_has_reset_engine(&ctx->i915->gt)) if (!intel_has_reset_engine(to_gt(ctx->i915)))
return -ENODEV; return -ENODEV;
i915_gem_context_clear_persistence(ctx); i915_gem_context_clear_persistence(ctx);
...@@ -1559,7 +1559,7 @@ i915_gem_create_context(struct drm_i915_private *i915, ...@@ -1559,7 +1559,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
} else if (HAS_FULL_PPGTT(i915)) { } else if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt; struct i915_ppgtt *ppgtt;
ppgtt = i915_ppgtt_create(&i915->gt, 0); ppgtt = i915_ppgtt_create(to_gt(i915), 0);
if (IS_ERR(ppgtt)) { if (IS_ERR(ppgtt)) {
drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt)); PTR_ERR(ppgtt));
...@@ -1742,7 +1742,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, ...@@ -1742,7 +1742,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
if (args->flags) if (args->flags)
return -EINVAL; return -EINVAL;
ppgtt = i915_ppgtt_create(&i915->gt, 0); ppgtt = i915_ppgtt_create(to_gt(i915), 0);
if (IS_ERR(ppgtt)) if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt); return PTR_ERR(ppgtt);
...@@ -2194,7 +2194,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, ...@@ -2194,7 +2194,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
return -EINVAL; return -EINVAL;
ret = intel_gt_terminally_wedged(&i915->gt); ret = intel_gt_terminally_wedged(to_gt(i915));
if (ret) if (ret)
return ret; return ret;
......
...@@ -379,7 +379,7 @@ static int ext_set_protected(struct i915_user_extension __user *base, void *data ...@@ -379,7 +379,7 @@ static int ext_set_protected(struct i915_user_extension __user *base, void *data
if (ext.flags) if (ext.flags)
return -EINVAL; return -EINVAL;
if (!intel_pxp_is_enabled(&ext_data->i915->gt.pxp)) if (!intel_pxp_is_enabled(&to_gt(ext_data->i915)->pxp))
return -ENODEV; return -ENODEV;
ext_data->flags |= I915_BO_PROTECTED; ext_data->flags |= I915_BO_PROTECTED;
......
...@@ -2361,9 +2361,9 @@ static int eb_submit(struct i915_execbuffer *eb) ...@@ -2361,9 +2361,9 @@ static int eb_submit(struct i915_execbuffer *eb)
return err; return err;
} }
static int num_vcs_engines(const struct drm_i915_private *i915) static int num_vcs_engines(struct drm_i915_private *i915)
{ {
return hweight_long(VDBOX_MASK(&i915->gt)); return hweight_long(VDBOX_MASK(to_gt(i915)));
} }
/* /*
......
...@@ -645,7 +645,7 @@ mmap_offset_attach(struct drm_i915_gem_object *obj, ...@@ -645,7 +645,7 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
goto insert; goto insert;
/* Attempt to reap some mmap space from dead objects */ /* Attempt to reap some mmap space from dead objects */
err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT, err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT,
NULL); NULL);
if (err) if (err)
goto err; goto err;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{ {
struct address_space *mapping = obj->base.filp->f_mapping; struct address_space *mapping = obj->base.filp->f_mapping;
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct scatterlist *sg; struct scatterlist *sg;
struct sg_table *st; struct sg_table *st;
dma_addr_t dma; dma_addr_t dma;
...@@ -73,7 +74,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) ...@@ -73,7 +74,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
dst += PAGE_SIZE; dst += PAGE_SIZE;
} }
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); intel_gt_chipset_flush(to_gt(i915));
/* We're no longer struct page backed */ /* We're no longer struct page backed */
obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE; obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
...@@ -140,6 +141,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj, ...@@ -140,6 +141,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
{ {
void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr); char __user *user_data = u64_to_user_ptr(args->data_ptr);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
int err; int err;
err = i915_gem_object_wait(obj, err = i915_gem_object_wait(obj,
...@@ -159,7 +161,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj, ...@@ -159,7 +161,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
return -EFAULT; return -EFAULT;
drm_clflush_virt_range(vaddr, args->size); drm_clflush_virt_range(vaddr, args->size);
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); intel_gt_chipset_flush(to_gt(i915));
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
return 0; return 0;
......
...@@ -35,7 +35,7 @@ void i915_gem_suspend(struct drm_i915_private *i915) ...@@ -35,7 +35,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do * state. Fortunately, the kernel_context is disposable and we do
* not rely on its state. * not rely on its state.
*/ */
intel_gt_suspend_prepare(&i915->gt); intel_gt_suspend_prepare(to_gt(i915));
i915_gem_drain_freed_objects(i915); i915_gem_drain_freed_objects(i915);
} }
...@@ -153,7 +153,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) ...@@ -153,7 +153,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
* machine in an unusable condition. * machine in an unusable condition.
*/ */
intel_gt_suspend_late(&i915->gt); intel_gt_suspend_late(to_gt(i915));
spin_lock_irqsave(&i915->mm.obj_lock, flags); spin_lock_irqsave(&i915->mm.obj_lock, flags);
for (phase = phases; *phase; phase++) { for (phase = phases; *phase; phase++) {
...@@ -223,7 +223,7 @@ void i915_gem_resume(struct drm_i915_private *i915) ...@@ -223,7 +223,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
* guarantee that the context image is complete. So let's just reset * guarantee that the context image is complete. So let's just reset
* it and start again. * it and start again.
*/ */
intel_gt_resume(&i915->gt); intel_gt_resume(to_gt(i915));
ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU); ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU);
GEM_WARN_ON(ret); GEM_WARN_ON(ret);
......
...@@ -153,7 +153,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, ...@@ -153,7 +153,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
*/ */
if (shrink & I915_SHRINK_ACTIVE) if (shrink & I915_SHRINK_ACTIVE)
/* Retire requests to unpin all idle contexts */ /* Retire requests to unpin all idle contexts */
intel_gt_retire_requests(&i915->gt); intel_gt_retire_requests(to_gt(i915));
/* /*
* As we may completely rewrite the (un)bound list whilst unbinding * As we may completely rewrite the (un)bound list whilst unbinding
......
...@@ -38,12 +38,13 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data, ...@@ -38,12 +38,13 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
{ {
const unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; const unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_private *i915 = to_i915(dev);
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
unsigned long idx; unsigned long idx;
long ret; long ret;
/* ABI: return -EIO if already wedged */ /* ABI: return -EIO if already wedged */
ret = intel_gt_terminally_wedged(&to_i915(dev)->gt); ret = intel_gt_terminally_wedged(to_gt(i915));
if (ret) if (ret)
return ret; return ret;
......
...@@ -397,7 +397,7 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo, ...@@ -397,7 +397,7 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
enum i915_cache_level src_level, dst_level; enum i915_cache_level src_level, dst_level;
int ret; int ret;
if (!i915->gt.migrate.context || intel_gt_is_wedged(&i915->gt)) if (!to_gt(i915)->migrate.context || intel_gt_is_wedged(to_gt(i915)))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/* With fail_gpu_migration, we always perform a GPU clear. */ /* With fail_gpu_migration, we always perform a GPU clear. */
...@@ -410,8 +410,8 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo, ...@@ -410,8 +410,8 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
!I915_SELFTEST_ONLY(fail_gpu_migration)) !I915_SELFTEST_ONLY(fail_gpu_migration))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
intel_engine_pm_get(i915->gt.migrate.context->engine); intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
ret = intel_context_migrate_clear(i915->gt.migrate.context, dep, ret = intel_context_migrate_clear(to_gt(i915)->migrate.context, dep,
dst_st->sgl, dst_level, dst_st->sgl, dst_level,
i915_ttm_gtt_binds_lmem(dst_mem), i915_ttm_gtt_binds_lmem(dst_mem),
0, &rq); 0, &rq);
...@@ -423,8 +423,8 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo, ...@@ -423,8 +423,8 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
return ERR_CAST(src_rsgt); return ERR_CAST(src_rsgt);
src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm); src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
intel_engine_pm_get(i915->gt.migrate.context->engine); intel_engine_pm_get(to_gt(i915)->migrate.context->engine);
ret = intel_context_migrate_copy(i915->gt.migrate.context, ret = intel_context_migrate_copy(to_gt(i915)->migrate.context,
dep, src_rsgt->table.sgl, dep, src_rsgt->table.sgl,
src_level, src_level,
i915_ttm_gtt_binds_lmem(bo->resource), i915_ttm_gtt_binds_lmem(bo->resource),
...@@ -435,7 +435,7 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo, ...@@ -435,7 +435,7 @@ static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo,
i915_refct_sgt_put(src_rsgt); i915_refct_sgt_put(src_rsgt);
} }
intel_engine_pm_put(i915->gt.migrate.context->engine); intel_engine_pm_put(to_gt(i915)->migrate.context->engine);
if (ret && rq) { if (ret && rq) {
i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
......
...@@ -529,7 +529,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, ...@@ -529,7 +529,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
* On almost all of the older hw, we cannot tell the GPU that * On almost all of the older hw, we cannot tell the GPU that
* a page is readonly. * a page is readonly.
*/ */
if (!dev_priv->gt.vm->has_read_only) if (!to_gt(dev_priv)->vm->has_read_only)
return -ENODEV; return -ENODEV;
} }
......
...@@ -1705,7 +1705,7 @@ int i915_gem_huge_page_mock_selftests(void) ...@@ -1705,7 +1705,7 @@ int i915_gem_huge_page_mock_selftests(void)
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
mkwrite_device_info(dev_priv)->ppgtt_size = 48; mkwrite_device_info(dev_priv)->ppgtt_size = 48;
ppgtt = i915_ppgtt_create(&dev_priv->gt, 0); ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
if (IS_ERR(ppgtt)) { if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt); err = PTR_ERR(ppgtt);
goto out_unlock; goto out_unlock;
...@@ -1747,7 +1747,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) ...@@ -1747,7 +1747,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
return 0; return 0;
} }
if (intel_gt_is_wedged(&i915->gt)) if (intel_gt_is_wedged(to_gt(i915)))
return 0; return 0;
return i915_live_subtests(tests, i915); return i915_live_subtests(tests, i915);
......
...@@ -592,7 +592,7 @@ int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915) ...@@ -592,7 +592,7 @@ int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_client_tiled_blits), SUBTEST(igt_client_tiled_blits),
}; };
if (intel_gt_is_wedged(&i915->gt)) if (intel_gt_is_wedged(to_gt(i915)))
return 0; return 0;
return i915_live_subtests(tests, i915); return i915_live_subtests(tests, i915);
......
...@@ -90,7 +90,7 @@ static int live_nop_switch(void *arg) ...@@ -90,7 +90,7 @@ static int live_nop_switch(void *arg)
} }
if (i915_request_wait(rq, 0, 10 * HZ) < 0) { if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
pr_err("Failed to populated %d contexts\n", nctx); pr_err("Failed to populated %d contexts\n", nctx);
intel_gt_set_wedged(&i915->gt); intel_gt_set_wedged(to_gt(i915));
i915_request_put(rq); i915_request_put(rq);
err = -EIO; err = -EIO;
goto out_file; goto out_file;
...@@ -146,7 +146,7 @@ static int live_nop_switch(void *arg) ...@@ -146,7 +146,7 @@ static int live_nop_switch(void *arg)
if (i915_request_wait(rq, 0, HZ / 5) < 0) { if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Switching between %ld contexts timed out\n", pr_err("Switching between %ld contexts timed out\n",
prime); prime);
intel_gt_set_wedged(&i915->gt); intel_gt_set_wedged(to_gt(i915));
i915_request_put(rq); i915_request_put(rq);
break; break;
} }
...@@ -1223,7 +1223,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, ...@@ -1223,7 +1223,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
return 0; return 0;
if (flags & TEST_RESET) if (flags & TEST_RESET)
igt_global_reset_lock(&i915->gt); igt_global_reset_lock(to_gt(i915));
obj = i915_gem_object_create_internal(i915, PAGE_SIZE); obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
...@@ -1306,7 +1306,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, ...@@ -1306,7 +1306,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
out_unlock: out_unlock:
if (flags & TEST_RESET) if (flags & TEST_RESET)
igt_global_reset_unlock(&i915->gt); igt_global_reset_unlock(to_gt(i915));
if (ret) if (ret)
pr_err("%s: Failed with %d!\n", name, ret); pr_err("%s: Failed with %d!\n", name, ret);
...@@ -1877,7 +1877,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915) ...@@ -1877,7 +1877,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_vm_isolation), SUBTEST(igt_vm_isolation),
}; };
if (intel_gt_is_wedged(&i915->gt)) if (intel_gt_is_wedged(to_gt(i915)))
return 0; return 0;
return i915_live_subtests(tests, i915); return i915_live_subtests(tests, i915);
......
...@@ -261,5 +261,5 @@ int i915_gem_migrate_live_selftests(struct drm_i915_private *i915) ...@@ -261,5 +261,5 @@ int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
if (!HAS_LMEM(i915)) if (!HAS_LMEM(i915))
return 0; return 0;
return intel_gt_live_subtests(tests, &i915->gt); return intel_gt_live_subtests(tests, to_gt(i915));
} }
...@@ -84,6 +84,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, ...@@ -84,6 +84,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
struct rnd_state *prng) struct rnd_state *prng)
{ {
const unsigned long npages = obj->base.size / PAGE_SIZE; const unsigned long npages = obj->base.size / PAGE_SIZE;
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt_view view; struct i915_ggtt_view view;
struct i915_vma *vma; struct i915_vma *vma;
unsigned long page; unsigned long page;
...@@ -141,7 +142,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, ...@@ -141,7 +142,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
if (offset >= obj->base.size) if (offset >= obj->base.size)
goto out; goto out;
intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt); intel_gt_flush_ggtt_writes(to_gt(i915));
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
cpu = kmap(p) + offset_in_page(offset); cpu = kmap(p) + offset_in_page(offset);
...@@ -175,6 +176,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj, ...@@ -175,6 +176,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
{ {
const unsigned int nreal = obj->scratch / PAGE_SIZE; const unsigned int nreal = obj->scratch / PAGE_SIZE;
const unsigned long npages = obj->base.size / PAGE_SIZE; const unsigned long npages = obj->base.size / PAGE_SIZE;
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_vma *vma; struct i915_vma *vma;
unsigned long page; unsigned long page;
int err; int err;
...@@ -234,7 +236,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj, ...@@ -234,7 +236,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
if (offset >= obj->base.size) if (offset >= obj->base.size)
continue; continue;
intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt); intel_gt_flush_ggtt_writes(to_gt(i915));
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
cpu = kmap(p) + offset_in_page(offset); cpu = kmap(p) + offset_in_page(offset);
...@@ -616,14 +618,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, ...@@ -616,14 +618,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
static void disable_retire_worker(struct drm_i915_private *i915) static void disable_retire_worker(struct drm_i915_private *i915)
{ {
i915_gem_driver_unregister__shrinker(i915); i915_gem_driver_unregister__shrinker(i915);
intel_gt_pm_get(&i915->gt); intel_gt_pm_get(to_gt(i915));
cancel_delayed_work_sync(&i915->gt.requests.retire_work); cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
} }
static void restore_retire_worker(struct drm_i915_private *i915) static void restore_retire_worker(struct drm_i915_private *i915)
{ {
igt_flush_test(i915); igt_flush_test(i915);
intel_gt_pm_put(&i915->gt); intel_gt_pm_put(to_gt(i915));
i915_gem_driver_register__shrinker(i915); i915_gem_driver_register__shrinker(i915);
} }
...@@ -651,8 +653,8 @@ static int igt_mmap_offset_exhaustion(void *arg) ...@@ -651,8 +653,8 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Disable background reaper */ /* Disable background reaper */
disable_retire_worker(i915); disable_retire_worker(i915);
GEM_BUG_ON(!i915->gt.awake); GEM_BUG_ON(!to_gt(i915)->awake);
intel_gt_retire_requests(&i915->gt); intel_gt_retire_requests(to_gt(i915));
i915_gem_drain_freed_objects(i915); i915_gem_drain_freed_objects(i915);
/* Trim the device mmap space to only a page */ /* Trim the device mmap space to only a page */
...@@ -728,7 +730,7 @@ static int igt_mmap_offset_exhaustion(void *arg) ...@@ -728,7 +730,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Now fill with busy dead objects that we expect to reap */ /* Now fill with busy dead objects that we expect to reap */
for (loop = 0; loop < 3; loop++) { for (loop = 0; loop < 3; loop++) {
if (intel_gt_is_wedged(&i915->gt)) if (intel_gt_is_wedged(to_gt(i915)))
break; break;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE); obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
...@@ -942,7 +944,7 @@ static int __igt_mmap(struct drm_i915_private *i915, ...@@ -942,7 +944,7 @@ static int __igt_mmap(struct drm_i915_private *i915,
} }
if (type == I915_MMAP_TYPE_GTT) if (type == I915_MMAP_TYPE_GTT)
intel_gt_flush_ggtt_writes(&i915->gt); intel_gt_flush_ggtt_writes(to_gt(i915));
err = wc_check(obj); err = wc_check(obj);
if (err == -ENXIO) if (err == -ENXIO)
...@@ -1049,7 +1051,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915, ...@@ -1049,7 +1051,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915,
goto out_unmap; goto out_unmap;
} }
intel_gt_flush_ggtt_writes(&i915->gt); intel_gt_flush_ggtt_writes(to_gt(i915));
err = access_process_vm(current, addr, &x, sizeof(x), 0); err = access_process_vm(current, addr, &x, sizeof(x), 0);
if (err != sizeof(x)) { if (err != sizeof(x)) {
...@@ -1065,7 +1067,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915, ...@@ -1065,7 +1067,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915,
goto out_unmap; goto out_unmap;
} }
intel_gt_flush_ggtt_writes(&i915->gt); intel_gt_flush_ggtt_writes(to_gt(i915));
err = __get_user(y, ptr); err = __get_user(y, ptr);
if (err) { if (err) {
...@@ -1165,7 +1167,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915, ...@@ -1165,7 +1167,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
} }
if (type == I915_MMAP_TYPE_GTT) if (type == I915_MMAP_TYPE_GTT)
intel_gt_flush_ggtt_writes(&i915->gt); intel_gt_flush_ggtt_writes(to_gt(i915));
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
struct i915_request *rq; struct i915_request *rq;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment