Commit 8b5342f5 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

drm/i915: Compartmentalize i915_ggtt_init_hw

Having made start to better code compartmentalization by introducing
struct intel_gt, continue the theme elsewhere in code by making functions
take parameters take what logically makes most sense for them instead of
the global struct drm_i915_private.
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-19-tvrtko.ursulin@linux.intel.com
parent ee1de7dd
......@@ -3516,45 +3516,65 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
return 0;
}
/**
* i915_ggtt_init_hw - Initialize GGTT hardware
* @dev_priv: i915 device
*/
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
ggtt->vm.cleanup(&ggtt->vm);
}
stash_init(&dev_priv->mm.wc_stash);
static int ggtt_init_hw(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
int ret = 0;
mutex_lock(&i915->drm.struct_mutex);
/* Note that we use page colouring to enforce a guard page at the
* end of the address space. This is required as the CS may prefetch
* beyond the end of the batch buffer, across the page boundary,
* and beyond the end of the GTT if we do not provide a guard.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
ggtt->vm.is_ggtt = true;
/* Only VLV supports read-only GGTT mappings */
ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
dev_priv->ggtt.gmadr.start,
dev_priv->ggtt.mappable_end)) {
if (!io_mapping_init_wc(&ggtt->iomap,
ggtt->gmadr.start,
ggtt->mappable_end)) {
ggtt_cleanup_hw(ggtt);
ret = -EIO;
goto out_gtt_cleanup;
goto out;
}
ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
i915_ggtt_init_fences(ggtt);
out:
mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
/**
* i915_ggtt_init_hw - Initialize GGTT hardware
* @dev_priv: i915 device
*/
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
{
int ret;
stash_init(&dev_priv->mm.wc_stash);
/* Note that we use page colouring to enforce a guard page at the
* end of the address space. This is required as the CS may prefetch
* beyond the end of the batch buffer, across the page boundary,
* and beyond the end of the GTT if we do not provide a guard.
*/
ret = ggtt_init_hw(&dev_priv->ggtt);
if (ret)
return ret;
/*
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
......@@ -3566,7 +3586,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
return 0;
out_gtt_cleanup:
ggtt->vm.cleanup(&ggtt->vm);
ggtt_cleanup_hw(&dev_priv->ggtt);
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment