Commit 5b30694b authored by Chris Wilson's avatar Chris Wilson

drm/i915: Align GGTT sizes to a fence tile row

Ensure the view occupies the full tile row so that reads/writes into the
VMA do not escape (via fenced detiling) into neighbouring objects - we
will pad the object with scratch pages to satisfy the fence. This
applies the lazy-tiling we employed on gen2/3 to gen4+.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170109161613.11881-2-chris@chris-wilson.co.uk
parent 6649a0b6
...@@ -3361,9 +3361,10 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file); ...@@ -3361,9 +3361,10 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file);
u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size, u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
int tiling_mode); int tiling_mode, unsigned int stride);
u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size, u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
int tiling_mode, bool fenced); int tiling_mode, unsigned int stride,
bool fenced);
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level); enum i915_cache_level cache_level);
......
...@@ -2021,21 +2021,29 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) ...@@ -2021,21 +2021,29 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
* @dev_priv: i915 device * @dev_priv: i915 device
* @size: object size * @size: object size
* @tiling_mode: tiling mode * @tiling_mode: tiling mode
* @stride: tiling stride
* *
* Return the required global GTT size for an object, taking into account * Return the required global GTT size for an object, taking into account
* potential fence register mapping. * potential fence register mapping.
*/ */
u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
u64 size, int tiling_mode) u64 size, int tiling_mode, unsigned int stride)
{ {
u64 ggtt_size; u64 ggtt_size;
GEM_BUG_ON(size == 0); GEM_BUG_ON(!size);
if (INTEL_GEN(dev_priv) >= 4 || if (tiling_mode == I915_TILING_NONE)
tiling_mode == I915_TILING_NONE)
return size; return size;
GEM_BUG_ON(!stride);
if (INTEL_GEN(dev_priv) >= 4) {
stride *= i915_gem_tile_height(tiling_mode);
GEM_BUG_ON(stride & 4095);
return roundup(size, stride);
}
/* Previous chips need a power-of-two fence region when tiling */ /* Previous chips need a power-of-two fence region when tiling */
if (IS_GEN3(dev_priv)) if (IS_GEN3(dev_priv))
ggtt_size = 1024*1024; ggtt_size = 1024*1024;
...@@ -2053,15 +2061,17 @@ u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, ...@@ -2053,15 +2061,17 @@ u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
* @dev_priv: i915 device * @dev_priv: i915 device
* @size: object size * @size: object size
* @tiling_mode: tiling mode * @tiling_mode: tiling mode
* @stride: tiling stride
* @fenced: is fenced alignment required or not * @fenced: is fenced alignment required or not
* *
* Return the required global GTT alignment for an object, taking into account * Return the required global GTT alignment for an object, taking into account
* potential fence register mapping. * potential fence register mapping.
*/ */
u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size, u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
int tiling_mode, bool fenced) int tiling_mode, unsigned int stride,
bool fenced)
{ {
GEM_BUG_ON(size == 0); GEM_BUG_ON(!size);
/* /*
* Minimum alignment is 4k (GTT page size), but might be greater * Minimum alignment is 4k (GTT page size), but might be greater
...@@ -2076,7 +2086,7 @@ u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size, ...@@ -2076,7 +2086,7 @@ u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
* Previous chips need to be aligned to the size of the smallest * Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object. * fence register that can contain the object.
*/ */
return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode); return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode, stride);
} }
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
...@@ -3696,7 +3706,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, ...@@ -3696,7 +3706,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u32 fence_size; u32 fence_size;
fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size, fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
i915_gem_object_get_tiling(obj)); i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj));
/* If the required space is larger than the available /* If the required space is larger than the available
* aperture, we will not able to find a slot for the * aperture, we will not able to find a slot for the
* object and unbinding the object now will be in * object and unbinding the object now will be in
......
...@@ -117,7 +117,8 @@ i915_tiling_ok(struct drm_i915_private *dev_priv, ...@@ -117,7 +117,8 @@ i915_tiling_ok(struct drm_i915_private *dev_priv,
return true; return true;
} }
static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode) static bool i915_vma_fence_prepare(struct i915_vma *vma,
int tiling_mode, unsigned int stride)
{ {
struct drm_i915_private *dev_priv = vma->vm->i915; struct drm_i915_private *dev_priv = vma->vm->i915;
u32 size; u32 size;
...@@ -133,7 +134,7 @@ static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode) ...@@ -133,7 +134,7 @@ static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
return false; return false;
} }
size = i915_gem_get_ggtt_size(dev_priv, vma->size, tiling_mode); size = i915_gem_get_ggtt_size(dev_priv, vma->size, tiling_mode, stride);
if (vma->node.size < size) if (vma->node.size < size)
return false; return false;
...@@ -145,20 +146,17 @@ static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode) ...@@ -145,20 +146,17 @@ static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
/* Make the current GTT allocation valid for the change in tiling. */ /* Make the current GTT allocation valid for the change in tiling. */
static int static int
i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode) i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
int tiling_mode, unsigned int stride)
{ {
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
if (tiling_mode == I915_TILING_NONE) if (tiling_mode == I915_TILING_NONE)
return 0; return 0;
if (INTEL_GEN(dev_priv) >= 4)
return 0;
list_for_each_entry(vma, &obj->vma_list, obj_link) { list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (i915_vma_fence_prepare(vma, tiling_mode)) if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue; continue;
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
...@@ -255,7 +253,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -255,7 +253,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
* whilst executing a fenced command for an untiled object. * whilst executing a fenced command for an untiled object.
*/ */
err = i915_gem_object_fence_prepare(obj, args->tiling_mode); err = i915_gem_object_fence_prepare(obj,
args->tiling_mode,
args->stride);
if (!err) { if (!err) {
struct i915_vma *vma; struct i915_vma *vma;
......
...@@ -284,11 +284,14 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) ...@@ -284,11 +284,14 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
fence_size = i915_gem_get_ggtt_size(dev_priv, fence_size = i915_gem_get_ggtt_size(dev_priv,
vma->size, vma->size,
i915_gem_object_get_tiling(obj)); i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj));
fence_alignment = i915_gem_get_ggtt_alignment(dev_priv, fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
vma->size, vma->size,
i915_gem_object_get_tiling(obj), i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj),
true); true);
GEM_BUG_ON(!is_power_of_2(fence_alignment));
fenceable = (vma->node.size == fence_size && fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0); (vma->node.start & (fence_alignment - 1)) == 0);
...@@ -370,12 +373,15 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -370,12 +373,15 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
size = max(size, vma->size); size = max(size, vma->size);
if (flags & PIN_MAPPABLE) if (flags & PIN_MAPPABLE)
size = i915_gem_get_ggtt_size(dev_priv, size, size = i915_gem_get_ggtt_size(dev_priv, size,
i915_gem_object_get_tiling(obj)); i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj));
alignment = max(max(alignment, vma->display_alignment), alignment = max(max(alignment, vma->display_alignment),
i915_gem_get_ggtt_alignment(dev_priv, size, i915_gem_get_ggtt_alignment(dev_priv, size,
i915_gem_object_get_tiling(obj), i915_gem_object_get_tiling(obj),
i915_gem_object_get_stride(obj),
flags & PIN_MAPPABLE)); flags & PIN_MAPPABLE));
GEM_BUG_ON(!is_power_of_2(alignment));
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment