Commit 0b04d474 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm: Compute tight evictions for drm_mm_scan

Compute the minimal required hole during scan and only evict those nodes
that overlap. This enables us to reduce the number of nodes we need to
evict to the bare minimum.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20161222083641.2691-31-chris@chris-wilson.co.uk
parent 268c6498
...@@ -718,10 +718,10 @@ EXPORT_SYMBOL(drm_mm_replace_node); ...@@ -718,10 +718,10 @@ EXPORT_SYMBOL(drm_mm_replace_node);
* @color: opaque tag value to use for the allocation * @color: opaque tag value to use for the allocation
* @start: start of the allowed range for the allocation * @start: start of the allowed range for the allocation
* @end: end of the allowed range for the allocation * @end: end of the allowed range for the allocation
* @flags: flags to specify how the allocation will be performed afterwards
* *
* This simply sets up the scanning routines with the parameters for the desired * This simply sets up the scanning routines with the parameters for the desired
* hole. Note that there's no need to specify allocation flags, since they only * hole.
* change the place a node is allocated from within a suitable hole.
* *
* Warning: * Warning:
* As long as the scan list is non-empty, no other operations than * As long as the scan list is non-empty, no other operations than
...@@ -733,7 +733,8 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, ...@@ -733,7 +733,8 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
u64 alignment, u64 alignment,
unsigned long color, unsigned long color,
u64 start, u64 start,
u64 end) u64 end,
unsigned int flags)
{ {
DRM_MM_BUG_ON(start >= end); DRM_MM_BUG_ON(start >= end);
DRM_MM_BUG_ON(!size || size > end - start); DRM_MM_BUG_ON(!size || size > end - start);
...@@ -744,6 +745,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, ...@@ -744,6 +745,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
scan->color = color; scan->color = color;
scan->alignment = alignment; scan->alignment = alignment;
scan->size = size; scan->size = size;
scan->flags = flags;
DRM_MM_BUG_ON(end <= start); DRM_MM_BUG_ON(end <= start);
scan->range_start = start; scan->range_start = start;
...@@ -778,7 +780,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan, ...@@ -778,7 +780,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
DRM_MM_BUG_ON(node->mm != mm); DRM_MM_BUG_ON(node->mm != mm);
DRM_MM_BUG_ON(!node->allocated); DRM_MM_BUG_ON(!node->allocated);
DRM_MM_BUG_ON(node->scanned_block); DRM_MM_BUG_ON(node->scanned_block);
node->scanned_block = 1; node->scanned_block = true;
mm->scan_active++; mm->scan_active++;
hole = list_prev_entry(node, node_list); hole = list_prev_entry(node, node_list);
...@@ -800,15 +802,53 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan, ...@@ -800,15 +802,53 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
adj_start = max(col_start, scan->range_start); adj_start = max(col_start, scan->range_start);
adj_end = min(col_end, scan->range_end); adj_end = min(col_end, scan->range_end);
if (adj_end <= adj_start || adj_end - adj_start < scan->size)
return false;
if (scan->flags == DRM_MM_CREATE_TOP)
adj_start = adj_end - scan->size;
if (scan->alignment) {
u64 rem;
div64_u64_rem(adj_start, scan->alignment, &rem);
if (rem) {
adj_start -= rem;
if (scan->flags != DRM_MM_CREATE_TOP)
adj_start += scan->alignment;
if (adj_start < max(col_start, scan->range_start) ||
min(col_end, scan->range_end) - adj_start < scan->size)
return false;
if (adj_end <= adj_start ||
adj_end - adj_start < scan->size)
return false;
}
}
if (check_free_hole(adj_start, adj_end, if (mm->color_adjust) {
scan->size, scan->alignment)) { /* If allocations need adjusting due to neighbouring colours,
* we do not have enough information to decide if we need
* to evict nodes on either side of [adj_start, adj_end].
* What almost works is
* hit_start = adj_start + (hole_start - col_start);
* hit_end = adj_start + scan->size + (hole_end - col_end);
* but because the decision is only made on the final hole,
* we may underestimate the required adjustments for an
* interior allocation.
*/
scan->hit_start = hole_start; scan->hit_start = hole_start;
scan->hit_end = hole_end; scan->hit_end = hole_end;
return true; } else {
scan->hit_start = adj_start;
scan->hit_end = adj_start + scan->size;
} }
return false; DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
DRM_MM_BUG_ON(scan->hit_start < hole_start);
DRM_MM_BUG_ON(scan->hit_end > hole_end);
return true;
} }
EXPORT_SYMBOL(drm_mm_scan_add_block); EXPORT_SYMBOL(drm_mm_scan_add_block);
...@@ -836,7 +876,7 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, ...@@ -836,7 +876,7 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
DRM_MM_BUG_ON(node->mm != scan->mm); DRM_MM_BUG_ON(node->mm != scan->mm);
DRM_MM_BUG_ON(!node->scanned_block); DRM_MM_BUG_ON(!node->scanned_block);
node->scanned_block = 0; node->scanned_block = false;
DRM_MM_BUG_ON(!node->mm->scan_active); DRM_MM_BUG_ON(!node->mm->scan_active);
node->mm->scan_active--; node->mm->scan_active--;
...@@ -846,7 +886,7 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, ...@@ -846,7 +886,7 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
prev_node->hole_follows = node->scanned_preceeds_hole; prev_node->hole_follows = node->scanned_preceeds_hole;
list_add(&node->node_list, &prev_node->node_list); list_add(&node->node_list, &prev_node->node_list);
return (drm_mm_hole_node_end(node) > scan->hit_start && return (node->start + node->size > scan->hit_start &&
node->start < scan->hit_end); node->start < scan->hit_end);
} }
EXPORT_SYMBOL(drm_mm_scan_remove_block); EXPORT_SYMBOL(drm_mm_scan_remove_block);
......
...@@ -135,7 +135,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, ...@@ -135,7 +135,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
} }
/* Try to retire some entries */ /* Try to retire some entries */
drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0); drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, 0);
found = 0; found = 0;
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
......
...@@ -128,7 +128,8 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -128,7 +128,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
*/ */
drm_mm_scan_init_with_range(&scan, &vm->mm, drm_mm_scan_init_with_range(&scan, &vm->mm,
min_size, alignment, cache_level, min_size, alignment, cache_level,
start, end); start, end,
flags & PIN_HIGH ? DRM_MM_CREATE_TOP : 0);
if (flags & PIN_NONBLOCK) if (flags & PIN_NONBLOCK)
phases[1] = NULL; phases[1] = NULL;
......
...@@ -1199,7 +1199,7 @@ static bool evict_nothing(struct drm_mm *mm, ...@@ -1199,7 +1199,7 @@ static bool evict_nothing(struct drm_mm *mm,
struct drm_mm_node *node; struct drm_mm_node *node;
unsigned int n; unsigned int n;
drm_mm_scan_init(&scan, mm, 1, 0, 0); drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
for (n = 0; n < total_size; n++) { for (n = 0; n < total_size; n++) {
e = &nodes[n]; e = &nodes[n];
list_add(&e->link, &evict_list); list_add(&e->link, &evict_list);
...@@ -1246,7 +1246,7 @@ static bool evict_everything(struct drm_mm *mm, ...@@ -1246,7 +1246,7 @@ static bool evict_everything(struct drm_mm *mm,
unsigned int n; unsigned int n;
int err; int err;
drm_mm_scan_init(&scan, mm, total_size, 0, 0); drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
for (n = 0; n < total_size; n++) { for (n = 0; n < total_size; n++) {
e = &nodes[n]; e = &nodes[n];
list_add(&e->link, &evict_list); list_add(&e->link, &evict_list);
...@@ -1296,7 +1296,8 @@ static int evict_something(struct drm_mm *mm, ...@@ -1296,7 +1296,8 @@ static int evict_something(struct drm_mm *mm,
drm_mm_scan_init_with_range(&scan, mm, drm_mm_scan_init_with_range(&scan, mm,
size, alignment, 0, size, alignment, 0,
range_start, range_end); range_start, range_end,
mode->create_flags);
if (!evict_nodes(&scan, if (!evict_nodes(&scan,
nodes, order, count, nodes, order, count,
&evict_list)) &evict_list))
...@@ -1874,7 +1875,8 @@ static int evict_color(struct drm_mm *mm, ...@@ -1874,7 +1875,8 @@ static int evict_color(struct drm_mm *mm,
drm_mm_scan_init_with_range(&scan, mm, drm_mm_scan_init_with_range(&scan, mm,
size, alignment, color, size, alignment, color,
range_start, range_end); range_start, range_end,
mode->create_flags);
if (!evict_nodes(&scan, if (!evict_nodes(&scan,
nodes, order, count, nodes, order, count,
&evict_list)) &evict_list))
......
...@@ -120,6 +120,7 @@ struct drm_mm_scan { ...@@ -120,6 +120,7 @@ struct drm_mm_scan {
struct drm_mm_node *prev_scanned_node; struct drm_mm_node *prev_scanned_node;
unsigned long color; unsigned long color;
unsigned int flags;
}; };
/** /**
...@@ -388,11 +389,9 @@ __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); ...@@ -388,11 +389,9 @@ __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
struct drm_mm *mm, struct drm_mm *mm,
u64 size, u64 size, u64 alignment, unsigned long color,
u64 alignment, u64 start, u64 end,
unsigned long color, unsigned int flags);
u64 start,
u64 end);
/** /**
* drm_mm_scan_init - initialize lru scanning * drm_mm_scan_init - initialize lru scanning
...@@ -401,10 +400,10 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, ...@@ -401,10 +400,10 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
* @size: size of the allocation * @size: size of the allocation
* @alignment: alignment of the allocation * @alignment: alignment of the allocation
* @color: opaque tag value to use for the allocation * @color: opaque tag value to use for the allocation
* @flags: flags to specify how the allocation will be performed afterwards
* *
* This simply sets up the scanning routines with the parameters for the desired * This simply sets up the scanning routines with the parameters for the desired
* hole. Note that there's no need to specify allocation flags, since they only * hole.
* change the place a node is allocated from within a suitable hole.
* *
* Warning: * Warning:
* As long as the scan list is non-empty, no other operations than * As long as the scan list is non-empty, no other operations than
...@@ -414,10 +413,13 @@ static inline void drm_mm_scan_init(struct drm_mm_scan *scan, ...@@ -414,10 +413,13 @@ static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
struct drm_mm *mm, struct drm_mm *mm,
u64 size, u64 size,
u64 alignment, u64 alignment,
unsigned long color) unsigned long color,
unsigned int flags)
{ {
drm_mm_scan_init_with_range(scan, mm, size, alignment, color, drm_mm_scan_init_with_range(scan, mm,
0, U64_MAX); size, alignment, color,
0, U64_MAX,
flags);
} }
bool drm_mm_scan_add_block(struct drm_mm_scan *scan, bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment