Commit 05a20d09 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Move map-and-fenceable tracking to the VMA

By moving map-and-fenceable tracking from the object to the VMA, we gain
fine-grained tracking and the ability to track individual fences on the VMA
(subsequent patch).
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160818161718.27187-16-chris@chris-wilson.co.uk
parent 9e53d9be
...@@ -2192,12 +2192,6 @@ struct drm_i915_gem_object { ...@@ -2192,12 +2192,6 @@ struct drm_i915_gem_object {
*/ */
unsigned int fence_dirty:1; unsigned int fence_dirty:1;
/**
* Is the object at the current location in the gtt mappable and
* fenceable? Used to avoid costly recalculations.
*/
unsigned int map_and_fenceable:1;
/** /**
* Whether the current gtt mapping needs to be mappable (and isn't just * Whether the current gtt mapping needs to be mappable (and isn't just
* mappable by accident). Track pin and fault separate for a more * mappable by accident). Track pin and fault separate for a more
......
...@@ -2899,8 +2899,7 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -2899,8 +2899,7 @@ int i915_vma_unbind(struct i915_vma *vma)
GEM_BUG_ON(obj->bind_count == 0); GEM_BUG_ON(obj->bind_count == 0);
GEM_BUG_ON(!obj->pages); GEM_BUG_ON(!obj->pages);
if (i915_vma_is_ggtt(vma) && if (i915_vma_is_map_and_fenceable(vma)) {
vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
i915_gem_object_finish_gtt(obj); i915_gem_object_finish_gtt(obj);
/* release the fence reg _after_ flushing */ /* release the fence reg _after_ flushing */
...@@ -2909,6 +2908,7 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -2909,6 +2908,7 @@ int i915_vma_unbind(struct i915_vma *vma)
return ret; return ret;
__i915_vma_iounmap(vma); __i915_vma_iounmap(vma);
vma->flags &= ~I915_VMA_CAN_FENCE;
} }
if (likely(!vma->vm->closed)) { if (likely(!vma->vm->closed)) {
...@@ -2920,14 +2920,11 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -2920,14 +2920,11 @@ int i915_vma_unbind(struct i915_vma *vma)
drm_mm_remove_node(&vma->node); drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list); list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
if (i915_vma_is_ggtt(vma)) { if (vma->pages != obj->pages) {
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { GEM_BUG_ON(!vma->pages);
obj->map_and_fenceable = false;
} else if (vma->pages) {
sg_free_table(vma->pages); sg_free_table(vma->pages);
kfree(vma->pages); kfree(vma->pages);
} }
}
vma->pages = NULL; vma->pages = NULL;
/* Since the unbound list is global, only move to that list if /* Since the unbound list is global, only move to that list if
...@@ -3703,8 +3700,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) ...@@ -3703,8 +3700,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
static bool static bool
i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{ {
struct drm_i915_gem_object *obj = vma->obj;
if (!drm_mm_node_allocated(&vma->node)) if (!drm_mm_node_allocated(&vma->node))
return false; return false;
...@@ -3714,7 +3709,7 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -3714,7 +3709,7 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (alignment && vma->node.start & (alignment - 1)) if (alignment && vma->node.start & (alignment - 1))
return true; return true;
if (flags & PIN_MAPPABLE && !obj->map_and_fenceable) if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
return true; return true;
if (flags & PIN_OFFSET_BIAS && if (flags & PIN_OFFSET_BIAS &&
...@@ -3736,10 +3731,10 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) ...@@ -3736,10 +3731,10 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
u32 fence_size, fence_alignment; u32 fence_size, fence_alignment;
fence_size = i915_gem_get_ggtt_size(dev_priv, fence_size = i915_gem_get_ggtt_size(dev_priv,
obj->base.size, vma->size,
i915_gem_object_get_tiling(obj)); i915_gem_object_get_tiling(obj));
fence_alignment = i915_gem_get_ggtt_alignment(dev_priv, fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
obj->base.size, vma->size,
i915_gem_object_get_tiling(obj), i915_gem_object_get_tiling(obj),
true); true);
...@@ -3749,7 +3744,10 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) ...@@ -3749,7 +3744,10 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
mappable = (vma->node.start + fence_size <= mappable = (vma->node.start + fence_size <=
dev_priv->ggtt.mappable_end); dev_priv->ggtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable; if (mappable && fenceable)
vma->flags |= I915_VMA_CAN_FENCE;
else
vma->flags &= ~I915_VMA_CAN_FENCE;
} }
int __i915_vma_do_pin(struct i915_vma *vma, int __i915_vma_do_pin(struct i915_vma *vma,
...@@ -3809,12 +3807,11 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, ...@@ -3809,12 +3807,11 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
WARN(i915_vma_is_pinned(vma), WARN(i915_vma_is_pinned(vma),
"bo is already pinned in ggtt with incorrect alignment:" "bo is already pinned in ggtt with incorrect alignment:"
" offset=%08x, req.alignment=%llx, req.map_and_fenceable=%d," " offset=%08x, req.alignment=%llx,"
" obj->map_and_fenceable=%d\n", " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
i915_ggtt_offset(vma), i915_ggtt_offset(vma), alignment,
alignment,
!!(flags & PIN_MAPPABLE), !!(flags & PIN_MAPPABLE),
obj->map_and_fenceable); i915_vma_is_map_and_fenceable(vma));
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
......
...@@ -857,7 +857,6 @@ static bool ...@@ -857,7 +857,6 @@ static bool
eb_vma_misplaced(struct i915_vma *vma) eb_vma_misplaced(struct i915_vma *vma)
{ {
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj;
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
!i915_vma_is_ggtt(vma)); !i915_vma_is_ggtt(vma));
...@@ -878,7 +877,8 @@ eb_vma_misplaced(struct i915_vma *vma) ...@@ -878,7 +877,8 @@ eb_vma_misplaced(struct i915_vma *vma)
return true; return true;
/* avoid costly ping-pong once a batch bo ended up non-mappable */ /* avoid costly ping-pong once a batch bo ended up non-mappable */
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable) if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
!i915_vma_is_map_and_fenceable(vma))
return !only_mappable_for_reloc(entry->flags); return !only_mappable_for_reloc(entry->flags);
if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 && if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
......
...@@ -130,7 +130,9 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, ...@@ -130,7 +130,9 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
!is_power_of_2(vma->node.size) || !is_power_of_2(vma->node.size) ||
(vma->node.start & (vma->node.size - 1)), (vma->node.start & (vma->node.size - 1)),
"object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08llx) aligned\n", "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08llx) aligned\n",
vma->node.start, obj->map_and_fenceable, vma->node.size); vma->node.start,
i915_vma_is_map_and_fenceable(vma),
vma->node.size);
if (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) if (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128; tile_width = 128;
...@@ -389,9 +391,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) ...@@ -389,9 +391,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
return 0; return 0;
} }
} else if (enable) { } else if (enable) {
if (WARN_ON(!obj->map_and_fenceable))
return -EINVAL;
reg = i915_find_fence_reg(dev); reg = i915_find_fence_reg(dev);
if (IS_ERR(reg)) if (IS_ERR(reg))
return PTR_ERR(reg); return PTR_ERR(reg);
......
...@@ -3671,7 +3671,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) ...@@ -3671,7 +3671,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
assert_rpm_wakelock_held(to_i915(vma->vm->dev)); assert_rpm_wakelock_held(to_i915(vma->vm->dev));
lockdep_assert_held(&vma->vm->dev->struct_mutex); lockdep_assert_held(&vma->vm->dev->struct_mutex);
if (WARN_ON(!vma->obj->map_and_fenceable)) if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
return IO_ERR_PTR(-ENODEV); return IO_ERR_PTR(-ENODEV);
GEM_BUG_ON(!i915_vma_is_ggtt(vma)); GEM_BUG_ON(!i915_vma_is_ggtt(vma));
......
...@@ -198,7 +198,8 @@ struct i915_vma { ...@@ -198,7 +198,8 @@ struct i915_vma {
#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW) #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
#define I915_VMA_GGTT BIT(8) #define I915_VMA_GGTT BIT(8)
#define I915_VMA_CLOSED BIT(9) #define I915_VMA_CAN_FENCE BIT(9)
#define I915_VMA_CLOSED BIT(10)
unsigned int active; unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES]; struct i915_gem_active last_read[I915_NUM_ENGINES];
...@@ -239,6 +240,11 @@ static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) ...@@ -239,6 +240,11 @@ static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
return vma->flags & I915_VMA_GGTT; return vma->flags & I915_VMA_GGTT;
} }
static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
{
return vma->flags & I915_VMA_CAN_FENCE;
}
static inline bool i915_vma_is_closed(const struct i915_vma *vma) static inline bool i915_vma_is_closed(const struct i915_vma *vma)
{ {
return vma->flags & I915_VMA_CLOSED; return vma->flags & I915_VMA_CLOSED;
......
...@@ -134,7 +134,7 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode) ...@@ -134,7 +134,7 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
if (!vma) if (!vma)
return 0; return 0;
if (!obj->map_and_fenceable) if (!i915_vma_is_map_and_fenceable(vma))
return 0; return 0;
if (IS_GEN3(dev_priv)) { if (IS_GEN3(dev_priv)) {
...@@ -145,7 +145,7 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode) ...@@ -145,7 +145,7 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
goto bad; goto bad;
} }
size = i915_gem_get_ggtt_size(dev_priv, obj->base.size, tiling_mode); size = i915_gem_get_ggtt_size(dev_priv, vma->size, tiling_mode);
if (vma->node.size < size) if (vma->node.size < size)
goto bad; goto bad;
......
...@@ -2224,7 +2224,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) ...@@ -2224,7 +2224,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
* framebuffer compression. For simplicity, we always install * framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous. * a fence as the cost is not that onerous.
*/ */
if (view.type == I915_GGTT_VIEW_NORMAL) { if (i915_vma_is_map_and_fenceable(vma)) {
ret = i915_gem_object_get_fence(obj); ret = i915_gem_object_get_fence(obj);
if (ret == -EDEADLK) { if (ret == -EDEADLK) {
/* /*
...@@ -2262,11 +2262,11 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) ...@@ -2262,11 +2262,11 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
intel_fill_fb_ggtt_view(&view, fb, rotation); intel_fill_fb_ggtt_view(&view, fb, rotation);
vma = i915_gem_object_to_ggtt(obj, &view);
if (view.type == I915_GGTT_VIEW_NORMAL) if (i915_vma_is_map_and_fenceable(vma))
i915_gem_object_unpin_fence(obj); i915_gem_object_unpin_fence(obj);
vma = i915_gem_object_to_ggtt(obj, &view);
i915_gem_object_unpin_from_display_plane(vma); i915_gem_object_unpin_from_display_plane(vma);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment