Commit 625d988a authored by Chris Wilson's avatar Chris Wilson

drm/i915: Extract reserving space in the GTT to a helper

Extract drm_mm_reserve_node + calling i915_gem_evict_for_node into its
own routine so that it can be shared rather than duplicated.

v2: Kerneldoc
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: igvt-g-dev@lists.01.org
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170111112312.31493-2-chris@chris-wilson.co.uk
parent e007b19d
......@@ -3469,7 +3469,8 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm,
unsigned cache_level,
u64 start, u64 end,
unsigned flags);
int __must_check i915_gem_evict_for_vma(struct i915_vma *vma,
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
struct drm_mm_node *node,
unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
......
......@@ -231,7 +231,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
/**
* i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
* @target: address space and range to evict for
* @vm: address space to evict from
* @target: range (and color) to evict for
* @flags: additional flags to control the eviction algorithm
*
* This function will try to evict vmas that overlap the target node.
......@@ -239,18 +240,20 @@ i915_gem_evict_something(struct i915_address_space *vm,
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
int i915_gem_evict_for_node(struct i915_address_space *vm,
struct drm_mm_node *target,
unsigned int flags)
{
LIST_HEAD(eviction_list);
struct drm_mm_node *node;
u64 start = target->node.start;
u64 end = start + target->node.size;
u64 start = target->start;
u64 end = start + target->size;
struct i915_vma *vma, *next;
bool check_color;
int ret = 0;
lockdep_assert_held(&target->vm->i915->drm.struct_mutex);
trace_i915_gem_evict_vma(target, flags);
lockdep_assert_held(&vm->i915->drm.struct_mutex);
trace_i915_gem_evict_node(vm, target, flags);
/* Retire before we search the active list. Although we have
* reasonable accuracy in our retirement lists, we may have
......@@ -258,18 +261,18 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
* retiring.
*/
if (!(flags & PIN_NONBLOCK))
i915_gem_retire_requests(target->vm->i915);
i915_gem_retire_requests(vm->i915);
check_color = target->vm->mm.color_adjust;
check_color = vm->mm.color_adjust;
if (check_color) {
/* Expand search to cover neighbouring guard pages (or lack!) */
if (start > target->vm->start)
if (start > vm->start)
start -= I915_GTT_PAGE_SIZE;
if (end < target->vm->start + target->vm->total)
if (end < vm->start + vm->total)
end += I915_GTT_PAGE_SIZE;
}
drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) {
drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
/* If we find any non-objects (!vma), we cannot evict them */
if (node->color == I915_COLOR_UNEVICTABLE) {
ret = -ENOSPC;
......@@ -285,12 +288,12 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
* those as well to make room for our guard pages.
*/
if (check_color) {
if (vma->node.start + vma->node.size == target->node.start) {
if (vma->node.color == target->node.color)
if (vma->node.start + vma->node.size == node->start) {
if (vma->node.color == node->color)
continue;
}
if (vma->node.start == target->node.start + target->node.size) {
if (vma->node.color == target->node.color)
if (vma->node.start == node->start + node->size) {
if (vma->node.color == node->color)
continue;
}
}
......
......@@ -3554,6 +3554,58 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
return ret;
}
/**
* i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
* @vm - the &struct i915_address_space
* @node - the &struct drm_mm_node (typically i915_vma.mode)
* @size - how much space to allocate inside the GTT,
* must be #I915_GTT_PAGE_SIZE aligned
* @offset - where to insert inside the GTT,
* must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
* (@offset + @size) must fit within the address space
* @color - color to apply to node, if this node is not from a VMA,
* color must be #I915_COLOR_UNEVICTABLE
* @flags - control search and eviction behaviour
*
* i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
* the address space (using @size and @color). If the @node does not fit, it
* tries to evict any overlapping nodes from the GTT, including any
* neighbouring nodes if the colors do not match (to ensure guard pages between
* differing domains). See i915_gem_evict_for_node() for the gory details
* on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
* evicting active overlapping objects, and any overlapping node that is pinned
* or marked as unevictable will also result in failure.
*
* Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
* asked to wait for eviction and interrupted.
*/
int i915_gem_gtt_reserve(struct i915_address_space *vm,
struct drm_mm_node *node,
u64 size, u64 offset, unsigned long color,
unsigned int flags)
{
int err;
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(range_overflows(offset, size, vm->total));
node->size = size;
node->start = offset;
node->color = color;
err = drm_mm_reserve_node(&vm->mm, node);
if (err != -ENOSPC)
return err;
err = i915_gem_evict_for_node(vm, node, flags);
if (err == 0)
err = drm_mm_reserve_node(&vm->mm, node);
return err;
}
/**
* i915_gem_gtt_insert - insert a node into an address_space (GTT)
* @vm - the &struct i915_address_space
......
......@@ -532,6 +532,11 @@ int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
int i915_gem_gtt_reserve(struct i915_address_space *vm,
struct drm_mm_node *node,
u64 size, u64 offset, unsigned long color,
unsigned int flags);
int i915_gem_gtt_insert(struct i915_address_space *vm,
struct drm_mm_node *node,
u64 size, u64 alignment, unsigned long color,
......
......@@ -694,10 +694,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur
* later.
*/
vma->node.start = gtt_offset;
vma->node.size = size;
ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
size, gtt_offset, obj->cache_level,
0);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto err_pages;
......
......@@ -450,9 +450,9 @@ TRACE_EVENT(i915_gem_evict_vm,
TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
);
TRACE_EVENT(i915_gem_evict_vma,
TP_PROTO(struct i915_vma *vma, unsigned int flags),
TP_ARGS(vma, flags),
TRACE_EVENT(i915_gem_evict_node,
TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
TP_ARGS(vm, node, flags),
TP_STRUCT__entry(
__field(u32, dev)
......@@ -464,11 +464,11 @@ TRACE_EVENT(i915_gem_evict_vma,
),
TP_fast_assign(
__entry->dev = vma->vm->i915->drm.primary->index;
__entry->vm = vma->vm;
__entry->start = vma->node.start;
__entry->size = vma->node.size;
__entry->color = vma->node.color;
__entry->dev = vm->i915->drm.primary->index;
__entry->vm = vm;
__entry->start = node->start;
__entry->size = node->size;
__entry->color = node->color;
__entry->flags = flags;
),
......
......@@ -116,22 +116,20 @@ void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
memset(&bl_info, 0, sizeof(bl_info));
}
static int vgt_balloon_space(struct drm_mm *mm,
static int vgt_balloon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node,
unsigned long start, unsigned long end)
{
unsigned long size = end - start;
if (start == end)
if (start <= end)
return -EINVAL;
DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
node->start = start;
node->size = size;
return drm_mm_reserve_node(mm, node);
return i915_gem_gtt_reserve(&ggtt->base, node,
size, start, I915_COLOR_UNEVICTABLE,
0);
}
/**
......@@ -214,10 +212,8 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
/* Unmappable graphic memory ballooning */
if (unmappable_base > ggtt->mappable_end) {
ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[2],
ggtt->mappable_end,
unmappable_base);
ret = vgt_balloon_space(ggtt, &bl_info.space[2],
ggtt->mappable_end, unmappable_base);
if (ret)
goto err;
......@@ -228,18 +224,15 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
* because it is reserved to the guard page.
*/
if (unmappable_end < ggtt_end - PAGE_SIZE) {
ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[3],
unmappable_end,
ggtt_end - PAGE_SIZE);
ret = vgt_balloon_space(ggtt, &bl_info.space[3],
unmappable_end, ggtt_end - PAGE_SIZE);
if (ret)
goto err;
}
/* Mappable graphic memory ballooning */
if (mappable_base > ggtt->base.start) {
ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[0],
ret = vgt_balloon_space(ggtt, &bl_info.space[0],
ggtt->base.start, mappable_base);
if (ret)
......@@ -247,10 +240,8 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
}
if (mappable_end < ggtt->mappable_end) {
ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[1],
mappable_end,
ggtt->mappable_end);
ret = vgt_balloon_space(ggtt, &bl_info.space[1],
mappable_end, ggtt->mappable_end);
if (ret)
goto err;
......
......@@ -419,17 +419,11 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
goto err_unpin;
}
vma->node.start = offset;
vma->node.size = size;
vma->node.color = obj->cache_level;
ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
if (ret) {
ret = i915_gem_evict_for_vma(vma, flags);
if (ret == 0)
ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
size, offset, obj->cache_level,
flags);
if (ret)
goto err_unpin;
}
} else {
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
size, alignment, obj->cache_level,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment