Commit de180033 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Record allocated vma size

Tracking the size of the VMA as allocated allows us to dramatically
reduce the complexity of later functions (like inserting the VMA in to
the drm_mm range manager).
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-13-git-send-email-chris@chris-wilson.co.uk
parent a9f1481f
...@@ -2980,53 +2980,36 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj, ...@@ -2980,53 +2980,36 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
u64 alignment, u64 alignment,
u64 flags) u64 flags)
{ {
struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_i915_private *dev_priv = to_i915(dev);
u64 start, end;
u32 search_flag, alloc_flag;
struct i915_vma *vma; struct i915_vma *vma;
u64 start, end;
u64 min_alignment;
int ret; int ret;
if (i915_is_ggtt(vm)) { vma = ggtt_view ?
u32 fence_size, fence_alignment, unfenced_alignment; i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
u64 view_size; i915_gem_obj_lookup_or_create_vma(obj, vm);
if (IS_ERR(vma))
if (WARN_ON(!ggtt_view)) return vma;
return ERR_PTR(-EINVAL);
size = max(size, vma->size);
view_size = i915_ggtt_view_size(obj, ggtt_view); if (flags & PIN_MAPPABLE)
size = i915_gem_get_ggtt_size(dev_priv, size, obj->tiling_mode);
fence_size = i915_gem_get_ggtt_size(dev_priv,
view_size, min_alignment =
obj->tiling_mode); i915_gem_get_ggtt_alignment(dev_priv, size, obj->tiling_mode,
fence_alignment = i915_gem_get_ggtt_alignment(dev_priv, flags & PIN_MAPPABLE);
view_size, if (alignment == 0)
obj->tiling_mode, alignment = min_alignment;
true); if (alignment & (min_alignment - 1)) {
unfenced_alignment = i915_gem_get_ggtt_alignment(dev_priv, DRM_DEBUG("Invalid object alignment requested %llu, minimum %llu\n",
view_size, alignment, min_alignment);
obj->tiling_mode, return ERR_PTR(-EINVAL);
false);
size = max(size, view_size);
if (flags & PIN_MAPPABLE)
size = max_t(u64, size, fence_size);
if (alignment == 0)
alignment = flags & PIN_MAPPABLE ? fence_alignment :
unfenced_alignment;
if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
DRM_DEBUG("Invalid object (view type=%u) alignment requested %llx\n",
ggtt_view ? ggtt_view->type : 0,
alignment);
return ERR_PTR(-EINVAL);
}
} else {
size = max_t(u64, size, obj->base.size);
alignment = 4096;
} }
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
end = vm->total;
end = vma->vm->total;
if (flags & PIN_MAPPABLE) if (flags & PIN_MAPPABLE)
end = min_t(u64, end, dev_priv->ggtt.mappable_end); end = min_t(u64, end, dev_priv->ggtt.mappable_end);
if (flags & PIN_ZONE_4G) if (flags & PIN_ZONE_4G)
...@@ -3037,8 +3020,7 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj, ...@@ -3037,8 +3020,7 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
* attempt to find space. * attempt to find space.
*/ */
if (size > end) { if (size > end) {
DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n", DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
ggtt_view ? ggtt_view->type : 0,
size, obj->base.size, size, obj->base.size,
flags & PIN_MAPPABLE ? "mappable" : "total", flags & PIN_MAPPABLE ? "mappable" : "total",
end); end);
...@@ -3051,31 +3033,27 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj, ...@@ -3051,31 +3033,27 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj); i915_gem_object_pin_pages(obj);
vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
i915_gem_obj_lookup_or_create_vma(obj, vm);
if (IS_ERR(vma))
goto err_unpin;
if (flags & PIN_OFFSET_FIXED) { if (flags & PIN_OFFSET_FIXED) {
uint64_t offset = flags & PIN_OFFSET_MASK; uint64_t offset = flags & PIN_OFFSET_MASK;
if (offset & (alignment - 1) || offset > end - size) {
if (offset & (alignment - 1) || offset + size > end) {
ret = -EINVAL; ret = -EINVAL;
goto err_vma; goto err_unpin;
} }
vma->node.start = offset; vma->node.start = offset;
vma->node.size = size; vma->node.size = size;
vma->node.color = obj->cache_level; vma->node.color = obj->cache_level;
ret = drm_mm_reserve_node(&vm->mm, &vma->node); ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
if (ret) { if (ret) {
ret = i915_gem_evict_for_vma(vma); ret = i915_gem_evict_for_vma(vma);
if (ret == 0) if (ret == 0)
ret = drm_mm_reserve_node(&vm->mm, &vma->node); ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
if (ret)
goto err_unpin;
} }
if (ret)
goto err_vma;
} else { } else {
u32 search_flag, alloc_flag;
if (flags & PIN_HIGH) { if (flags & PIN_HIGH) {
search_flag = DRM_MM_SEARCH_BELOW; search_flag = DRM_MM_SEARCH_BELOW;
alloc_flag = DRM_MM_CREATE_TOP; alloc_flag = DRM_MM_CREATE_TOP;
...@@ -3094,36 +3072,35 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj, ...@@ -3094,36 +3072,35 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
alignment = 0; alignment = 0;
search_free: search_free:
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
&vma->node,
size, alignment, size, alignment,
obj->cache_level, obj->cache_level,
start, end, start, end,
search_flag, search_flag,
alloc_flag); alloc_flag);
if (ret) { if (ret) {
ret = i915_gem_evict_something(vm, size, alignment, ret = i915_gem_evict_something(vma->vm, size, alignment,
obj->cache_level, obj->cache_level,
start, end, start, end,
flags); flags);
if (ret == 0) if (ret == 0)
goto search_free; goto search_free;
goto err_vma; goto err_unpin;
} }
} }
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level)); GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_move_tail(&vma->vm_link, &vm->inactive_list); list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
obj->bind_count++; obj->bind_count++;
return vma; return vma;
err_vma:
vma = ERR_PTR(ret);
err_unpin: err_unpin:
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
return vma; return ERR_PTR(ret);
} }
bool bool
......
...@@ -184,7 +184,7 @@ static void ppgtt_unbind_vma(struct i915_vma *vma) ...@@ -184,7 +184,7 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
{ {
vma->vm->clear_range(vma->vm, vma->vm->clear_range(vma->vm,
vma->node.start, vma->node.start,
vma->obj->base.size, vma->size,
true); true);
} }
...@@ -2695,28 +2695,18 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, ...@@ -2695,28 +2695,18 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma) static void ggtt_unbind_vma(struct i915_vma *vma)
{ {
struct drm_device *dev = vma->vm->dev; struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
struct drm_i915_private *dev_priv = to_i915(dev); const u64 size = min(vma->size, vma->node.size);
struct drm_i915_gem_object *obj = vma->obj;
const uint64_t size = min_t(uint64_t,
obj->base.size,
vma->node.size);
if (vma->bound & GLOBAL_BIND) { if (vma->bound & GLOBAL_BIND)
vma->vm->clear_range(vma->vm, vma->vm->clear_range(vma->vm,
vma->node.start, vma->node.start, size,
size,
true); true);
}
if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
if (vma->bound & LOCAL_BIND && appgtt)
appgtt->base.clear_range(&appgtt->base, appgtt->base.clear_range(&appgtt->base,
vma->node.start, vma->node.start, size,
size,
true); true);
}
} }
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
...@@ -3374,14 +3364,14 @@ void i915_vma_close(struct i915_vma *vma) ...@@ -3374,14 +3364,14 @@ void i915_vma_close(struct i915_vma *vma)
static struct i915_vma * static struct i915_vma *
__i915_gem_vma_create(struct drm_i915_gem_object *obj, __i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm, struct i915_address_space *vm,
const struct i915_ggtt_view *ggtt_view) const struct i915_ggtt_view *view)
{ {
struct i915_vma *vma; struct i915_vma *vma;
int i; int i;
GEM_BUG_ON(vm->closed); GEM_BUG_ON(vm->closed);
if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) if (WARN_ON(i915_is_ggtt(vm) != !!view))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL); vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
...@@ -3395,12 +3385,22 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj, ...@@ -3395,12 +3385,22 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
list_add(&vma->vm_link, &vm->unbound_list); list_add(&vma->vm_link, &vm->unbound_list);
vma->vm = vm; vma->vm = vm;
vma->obj = obj; vma->obj = obj;
vma->size = obj->base.size;
vma->is_ggtt = i915_is_ggtt(vm); vma->is_ggtt = i915_is_ggtt(vm);
if (i915_is_ggtt(vm)) if (i915_is_ggtt(vm)) {
vma->ggtt_view = *ggtt_view; vma->ggtt_view = *view;
else if (view->type == I915_GGTT_VIEW_PARTIAL) {
vma->size = view->params.partial.size;
vma->size <<= PAGE_SHIFT;
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
vma->size =
intel_rotation_info_size(&view->params.rotated);
vma->size <<= PAGE_SHIFT;
}
} else {
i915_ppgtt_get(i915_vm_to_ppgtt(vm)); i915_ppgtt_get(i915_vm_to_ppgtt(vm));
}
list_add_tail(&vma->obj_link, &obj->vma_list); list_add_tail(&vma->obj_link, &obj->vma_list);
...@@ -3685,29 +3685,6 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, ...@@ -3685,29 +3685,6 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
return 0; return 0;
} }
/**
* i915_ggtt_view_size - Get the size of a GGTT view.
* @obj: Object the view is of.
* @view: The view in question.
*
* @return The size of the GGTT view in bytes.
*/
size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
if (view->type == I915_GGTT_VIEW_NORMAL) {
return obj->base.size;
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT;
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
return view->params.partial.size << PAGE_SHIFT;
} else {
WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
return obj->base.size;
}
}
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
{ {
void __iomem *ptr; void __iomem *ptr;
......
...@@ -180,6 +180,7 @@ struct i915_vma { ...@@ -180,6 +180,7 @@ struct i915_vma {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm; struct i915_address_space *vm;
void __iomem *iomap; void __iomem *iomap;
u64 size;
unsigned int active; unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES]; struct i915_gem_active last_read[I915_NUM_ENGINES];
...@@ -608,10 +609,6 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a, ...@@ -608,10 +609,6 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
return true; return true;
} }
size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
/** /**
* i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
* @vma: VMA to iomap * @vma: VMA to iomap
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment