Commit 5c24c9d2 authored by Michał Winiarski's avatar Michał Winiarski Committed by Matt Roper

drm/i915/gem: Use to_gt() helper for GGTT accesses

GGTT is currently available both through i915->ggtt and gt->ggtt, and we
eventually want to get rid of the i915->ggtt one.
Use to_gt() for all i915->ggtt accesses to help with the future
refactoring.
Signed-off-by: default avatarMichał Winiarski <michal.winiarski@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: default avatarAndi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: default avatarSujaritha Sundaresan <sujaritha.sundaresan@intel.com>
Reviewed-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Signed-off-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211219212500.61432-4-andi.shyti@linux.intel.com
parent 204129a2
......@@ -174,7 +174,7 @@ i915_gem_context_get_eb_vm(struct i915_gem_context *ctx)
vm = ctx->vm;
if (!vm)
vm = &ctx->i915->ggtt.vm;
vm = &to_gt(ctx->i915)->ggtt->vm;
vm = i915_vm_get(vm);
return vm;
......
......@@ -1095,7 +1095,7 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
struct drm_i915_private *i915 =
container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
return &i915->ggtt;
return to_gt(i915)->ggtt;
}
static void reloc_cache_unmap(struct reloc_cache *cache)
......
......@@ -294,7 +294,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *i915 = to_i915(dev);
struct intel_runtime_pm *rpm = &i915->runtime_pm;
struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
bool write = area->vm_flags & VM_WRITE;
struct i915_gem_ww_ctx ww;
intel_wakeref_t wakeref;
......@@ -387,16 +387,16 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
assert_rpm_wakelock_held(rpm);
/* Mark as being mmapped into userspace for later revocation */
mutex_lock(&i915->ggtt.vm.mutex);
mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
mutex_unlock(&i915->ggtt.vm.mutex);
list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list);
mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
/* Track the mmo associated with the fenced vma */
vma->mmo = mmo;
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
if (write) {
......@@ -511,7 +511,7 @@ void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
* wakeref.
*/
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
mutex_lock(&i915->ggtt.vm.mutex);
mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
if (!obj->userfault_count)
goto out;
......@@ -529,7 +529,7 @@ void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
wmb();
out:
mutex_unlock(&i915->ggtt.vm.mutex);
mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
......@@ -732,13 +732,14 @@ i915_gem_dumb_mmap_offset(struct drm_file *file,
u32 handle,
u64 *offset)
{
struct drm_i915_private *i915 = to_i915(dev);
enum i915_mmap_type mmap_type;
if (HAS_LMEM(to_i915(dev)))
mmap_type = I915_MMAP_TYPE_FIXED;
else if (pat_enabled())
mmap_type = I915_MMAP_TYPE_WC;
else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return -ENODEV;
else
mmap_type = I915_MMAP_TYPE_GTT;
......@@ -786,7 +787,7 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
switch (args->flags) {
case I915_MMAP_OFFSET_GTT:
if (!i915_ggtt_has_aperture(&i915->ggtt))
if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return -ENODEV;
type = I915_MMAP_TYPE_GTT;
break;
......
......@@ -23,7 +23,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
{
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 0);
flush_workqueue(i915->wq);
/*
......
......@@ -401,9 +401,9 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
I915_SHRINK_VMAPS);
/* We also want to clear any cached iomaps as they wrap vmap */
mutex_lock(&i915->ggtt.vm.mutex);
mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
list_for_each_entry_safe(vma, next,
&i915->ggtt.vm.bound_list, vm_link) {
&to_gt(i915)->ggtt->vm.bound_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT;
struct drm_i915_gem_object *obj = vma->obj;
......@@ -418,7 +418,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
i915_gem_object_unlock(obj);
}
mutex_unlock(&i915->ggtt.vm.mutex);
mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
......
......@@ -71,7 +71,7 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
static int i915_adjust_stolen(struct drm_i915_private *i915,
struct resource *dsm)
{
struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
struct resource *r;
......@@ -582,6 +582,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages =
i915_pages_create_for_stolen(obj->base.dev,
obj->stolen->start,
......@@ -589,7 +590,7 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
if (IS_ERR(pages))
return PTR_ERR(pages);
dbg_poison(&to_i915(obj->base.dev)->ggtt,
dbg_poison(to_gt(i915)->ggtt,
sg_dma_address(pages->sgl),
sg_dma_len(pages->sgl),
POISON_INUSE);
......@@ -602,9 +603,10 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
/* Should only be called from i915_gem_object_release_stolen() */
dbg_poison(&to_i915(obj->base.dev)->ggtt,
dbg_poison(to_gt(i915)->ggtt,
sg_dma_address(pages->sgl),
sg_dma_len(pages->sgl),
POISON_FREE);
......
......@@ -181,7 +181,8 @@ static int
i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
int tiling_mode, unsigned int stride)
{
struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct i915_vma *vma, *vn;
LIST_HEAD(unbind);
int ret = 0;
......@@ -336,7 +337,7 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int err;
if (!dev_priv->ggtt.num_fences)
if (!to_gt(dev_priv)->ggtt->num_fences)
return -EOPNOTSUPP;
obj = i915_gem_object_lookup(file, args->handle);
......@@ -362,9 +363,9 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_x;
args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
else
args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_y;
args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
......@@ -419,7 +420,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int err = -ENOENT;
if (!dev_priv->ggtt.num_fences)
if (!to_gt(dev_priv)->ggtt->num_fences)
return -EOPNOTSUPP;
rcu_read_lock();
......@@ -435,10 +436,10 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
switch (args->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_x;
args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x;
break;
case I915_TILING_Y:
args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_y;
args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y;
break;
default:
case I915_TILING_NONE:
......
......@@ -543,7 +543,7 @@ static bool has_bit17_swizzle(int sw)
static bool bad_swizzling(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
return true;
......
......@@ -1374,7 +1374,7 @@ static int igt_ctx_readonly(void *arg)
goto out_file;
}
vm = ctx->vm ?: &i915->ggtt.alias->vm;
vm = ctx->vm ?: &to_gt(i915)->ggtt->alias->vm;
if (!vm || !vm->has_read_only) {
err = 0;
goto out_file;
......
......@@ -307,7 +307,7 @@ static int igt_partial_tiling(void *arg)
int tiling;
int err;
if (!i915_ggtt_has_aperture(&i915->ggtt))
if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return 0;
/* We want to check the page mapping and fencing of a large object
......@@ -320,7 +320,7 @@ static int igt_partial_tiling(void *arg)
obj = huge_gem_object(i915,
nreal << PAGE_SHIFT,
(1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
(1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
if (IS_ERR(obj))
return PTR_ERR(obj);
......@@ -366,10 +366,10 @@ static int igt_partial_tiling(void *arg)
tile.tiling = tiling;
switch (tiling) {
case I915_TILING_X:
tile.swizzle = i915->ggtt.bit_6_swizzle_x;
tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
break;
case I915_TILING_Y:
tile.swizzle = i915->ggtt.bit_6_swizzle_y;
tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
break;
}
......@@ -440,7 +440,7 @@ static int igt_smoke_tiling(void *arg)
IGT_TIMEOUT(end);
int err;
if (!i915_ggtt_has_aperture(&i915->ggtt))
if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return 0;
/*
......@@ -457,7 +457,7 @@ static int igt_smoke_tiling(void *arg)
obj = huge_gem_object(i915,
nreal << PAGE_SHIFT,
(1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
(1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
if (IS_ERR(obj))
return PTR_ERR(obj);
......@@ -486,10 +486,10 @@ static int igt_smoke_tiling(void *arg)
break;
case I915_TILING_X:
tile.swizzle = i915->ggtt.bit_6_swizzle_x;
tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
break;
case I915_TILING_Y:
tile.swizzle = i915->ggtt.bit_6_swizzle_y;
tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
break;
}
......@@ -856,6 +856,7 @@ static int wc_check(struct drm_i915_gem_object *obj)
static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
bool no_map;
if (obj->ops->mmap_offset)
......@@ -864,7 +865,7 @@ static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
return false;
if (type == I915_MMAP_TYPE_GTT &&
!i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return false;
i915_gem_object_lock(obj, NULL);
......
......@@ -43,7 +43,7 @@ static int igt_gem_huge(void *arg)
obj = huge_gem_object(i915,
nreal * PAGE_SIZE,
i915->ggtt.vm.total + PAGE_SIZE);
to_gt(i915)->ggtt->vm.total + PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment