Commit 4e64e553 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm: Improve drm_mm search (and fix topdown allocation) with rbtrees

The drm_mm range manager claimed to support top-down insertion, but it
was neither searching for the top-most hole that could fit the
allocation request nor fitting the request to the hole correctly.

In order to search the range efficiently, we create a secondary index
for the holes using either their size or their address. This index
allows us to find the smallest hole or the hole at the bottom or top of
the range efficiently, whilst keeping the hole stack to rapidly service
evictions.

v2: Search for holes both high and low. Rename flags to mode.
v3: Discover rb_entry_safe() and use it!
v4: Kerneldoc for enum drm_mm_insert_mode.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Russell King <rmk+kernel@armlinux.org.uk>
Cc: Daniel Vetter <daniel.vetter@intel.com>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Sean Paul <seanpaul@chromium.org>
Cc: Lucas Stach <l.stach@pengutronix.de>
Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Thierry Reding <thierry.reding@gmail.com>
Cc: Stephen Warren <swarren@wwwdotorg.org>
Cc: Alexandre Courbot <gnurou@gmail.com>
Cc: Eric Anholt <eric@anholt.net>
Cc: Sinclair Yeh <syeh@vmware.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: Sinclair Yeh <syeh@vmware.com> # vmwgfx
Reviewed-by: Lucas Stach <l.stach@pengutronix.de> #etnaviv
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20170202210438.28702-1-chris@chris-wilson.co.uk
parent 17aad8a3
...@@ -97,8 +97,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, ...@@ -97,8 +97,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
{ {
struct amdgpu_gtt_mgr *mgr = man->priv; struct amdgpu_gtt_mgr *mgr = man->priv;
struct drm_mm_node *node = mem->mm_node; struct drm_mm_node *node = mem->mm_node;
enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST; enum drm_mm_insert_mode mode;
enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
unsigned long fpfn, lpfn; unsigned long fpfn, lpfn;
int r; int r;
...@@ -115,15 +114,14 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, ...@@ -115,15 +114,14 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
else else
lpfn = man->size; lpfn = man->size;
if (place && place->flags & TTM_PL_FLAG_TOPDOWN) { mode = DRM_MM_INSERT_BEST;
sflags = DRM_MM_SEARCH_BELOW; if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
aflags = DRM_MM_CREATE_TOP; mode = DRM_MM_INSERT_HIGH;
}
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
r = drm_mm_insert_node_in_range_generic(&mgr->mm, node, mem->num_pages, r = drm_mm_insert_node_in_range(&mgr->mm, node,
mem->page_alignment, 0, mem->num_pages, mem->page_alignment, 0,
fpfn, lpfn, sflags, aflags); fpfn, lpfn, mode);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
if (!r) { if (!r) {
......
...@@ -97,8 +97,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, ...@@ -97,8 +97,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
struct amdgpu_vram_mgr *mgr = man->priv; struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm *mm = &mgr->mm; struct drm_mm *mm = &mgr->mm;
struct drm_mm_node *nodes; struct drm_mm_node *nodes;
enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT; enum drm_mm_insert_mode mode;
enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
unsigned long lpfn, num_nodes, pages_per_node, pages_left; unsigned long lpfn, num_nodes, pages_per_node, pages_left;
unsigned i; unsigned i;
int r; int r;
...@@ -121,10 +120,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, ...@@ -121,10 +120,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (!nodes) if (!nodes)
return -ENOMEM; return -ENOMEM;
if (place->flags & TTM_PL_FLAG_TOPDOWN) { mode = DRM_MM_INSERT_BEST;
sflags = DRM_MM_SEARCH_BELOW; if (place->flags & TTM_PL_FLAG_TOPDOWN)
aflags = DRM_MM_CREATE_TOP; mode = DRM_MM_INSERT_HIGH;
}
pages_left = mem->num_pages; pages_left = mem->num_pages;
...@@ -135,13 +133,11 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, ...@@ -135,13 +133,11 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (pages == pages_per_node) if (pages == pages_per_node)
alignment = pages_per_node; alignment = pages_per_node;
else
sflags |= DRM_MM_SEARCH_BEST;
r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages, r = drm_mm_insert_node_in_range(mm, &nodes[i],
alignment, 0, pages, alignment, 0,
place->fpfn, lpfn, place->fpfn, lpfn,
sflags, aflags); mode);
if (unlikely(r)) if (unlikely(r))
goto error; goto error;
......
...@@ -148,8 +148,8 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) ...@@ -148,8 +148,8 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
return -ENOSPC; return -ENOSPC;
mutex_lock(&priv->linear_lock); mutex_lock(&priv->linear_lock);
ret = drm_mm_insert_node(&priv->linear, node, size, align, ret = drm_mm_insert_node_generic(&priv->linear, node,
DRM_MM_SEARCH_DEFAULT); size, align, 0, 0);
mutex_unlock(&priv->linear_lock); mutex_unlock(&priv->linear_lock);
if (ret) { if (ret) {
kfree(node); kfree(node);
......
This diff is collapsed.
...@@ -212,8 +212,7 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, ...@@ -212,8 +212,7 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
goto out_unlock; goto out_unlock;
} }
ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, pages);
pages, 0, DRM_MM_SEARCH_DEFAULT);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
......
...@@ -108,6 +108,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, ...@@ -108,6 +108,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
struct drm_mm_node *node, size_t size) struct drm_mm_node *node, size_t size)
{ {
struct etnaviv_vram_mapping *free = NULL; struct etnaviv_vram_mapping *free = NULL;
enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
int ret; int ret;
lockdep_assert_held(&mmu->lock); lockdep_assert_held(&mmu->lock);
...@@ -119,9 +120,9 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, ...@@ -119,9 +120,9 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
bool found; bool found;
ret = drm_mm_insert_node_in_range(&mmu->mm, node, ret = drm_mm_insert_node_in_range(&mmu->mm, node,
size, 0, mmu->last_iova, ~0UL, size, 0, 0,
DRM_MM_SEARCH_DEFAULT); mmu->last_iova, U64_MAX,
mode);
if (ret != -ENOSPC) if (ret != -ENOSPC)
break; break;
...@@ -136,7 +137,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, ...@@ -136,7 +137,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
} }
/* Try to retire some entries */ /* Try to retire some entries */
drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, 0); drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
found = 0; found = 0;
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
...@@ -188,6 +189,8 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, ...@@ -188,6 +189,8 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
list_del_init(&m->scan_node); list_del_init(&m->scan_node);
} }
mode = DRM_MM_INSERT_EVICT;
/* /*
* We removed enough mappings so that the new allocation will * We removed enough mappings so that the new allocation will
* succeed, retry the allocation one more time. * succeed, retry the allocation one more time.
......
...@@ -69,12 +69,10 @@ insert_mappable_node(struct i915_ggtt *ggtt, ...@@ -69,12 +69,10 @@ insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size) struct drm_mm_node *node, u32 size)
{ {
memset(node, 0, sizeof(*node)); memset(node, 0, sizeof(*node));
return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node, return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
size, 0, size, 0, I915_COLOR_UNEVICTABLE,
I915_COLOR_UNEVICTABLE, 0, ggtt->mappable_end,
0, ggtt->mappable_end, DRM_MM_INSERT_LOW);
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
} }
static void static void
......
...@@ -109,6 +109,7 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -109,6 +109,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
}, **phase; }, **phase;
struct i915_vma *vma, *next; struct i915_vma *vma, *next;
struct drm_mm_node *node; struct drm_mm_node *node;
enum drm_mm_insert_mode mode;
int ret; int ret;
lockdep_assert_held(&vm->i915->drm.struct_mutex); lockdep_assert_held(&vm->i915->drm.struct_mutex);
...@@ -127,10 +128,14 @@ i915_gem_evict_something(struct i915_address_space *vm, ...@@ -127,10 +128,14 @@ i915_gem_evict_something(struct i915_address_space *vm,
* On each list, the oldest objects lie at the HEAD with the freshest * On each list, the oldest objects lie at the HEAD with the freshest
* object on the TAIL. * object on the TAIL.
*/ */
mode = DRM_MM_INSERT_BEST;
if (flags & PIN_HIGH)
mode = DRM_MM_INSERT_HIGH;
if (flags & PIN_MAPPABLE)
mode = DRM_MM_INSERT_LOW;
drm_mm_scan_init_with_range(&scan, &vm->mm, drm_mm_scan_init_with_range(&scan, &vm->mm,
min_size, alignment, cache_level, min_size, alignment, cache_level,
start, end, start, end, mode);
flags & PIN_HIGH ? DRM_MM_CREATE_TOP : 0);
/* Retire before we search the active list. Although we have /* Retire before we search the active list. Although we have
* reasonable accuracy in our retirement lists, we may have * reasonable accuracy in our retirement lists, we may have
......
...@@ -436,12 +436,11 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -436,12 +436,11 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
PIN_MAPPABLE | PIN_NONBLOCK); PIN_MAPPABLE | PIN_NONBLOCK);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node)); memset(&cache->node, 0, sizeof(cache->node));
ret = drm_mm_insert_node_in_range_generic ret = drm_mm_insert_node_in_range
(&ggtt->base.mm, &cache->node, (&ggtt->base.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end, 0, ggtt->mappable_end,
DRM_MM_SEARCH_DEFAULT, DRM_MM_INSERT_LOW);
DRM_MM_CREATE_DEFAULT);
if (ret) /* no inactive aperture space, use cpu reloc */ if (ret) /* no inactive aperture space, use cpu reloc */
return NULL; return NULL;
} else { } else {
......
...@@ -2748,12 +2748,10 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) ...@@ -2748,12 +2748,10 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
return ret; return ret;
/* Reserve a mappable slot for our lockless error capture */ /* Reserve a mappable slot for our lockless error capture */
ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
&ggtt->error_capture, PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
PAGE_SIZE, 0, 0, ggtt->mappable_end,
I915_COLOR_UNEVICTABLE, DRM_MM_INSERT_LOW);
0, ggtt->mappable_end,
0, 0);
if (ret) if (ret)
return ret; return ret;
...@@ -3663,7 +3661,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, ...@@ -3663,7 +3661,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
u64 size, u64 alignment, unsigned long color, u64 size, u64 alignment, unsigned long color,
u64 start, u64 end, unsigned int flags) u64 start, u64 end, unsigned int flags)
{ {
u32 search_flag, alloc_flag; enum drm_mm_insert_mode mode;
u64 offset; u64 offset;
int err; int err;
...@@ -3684,13 +3682,11 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, ...@@ -3684,13 +3682,11 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (unlikely(round_up(start, alignment) > round_down(end - size, alignment))) if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
return -ENOSPC; return -ENOSPC;
if (flags & PIN_HIGH) { mode = DRM_MM_INSERT_BEST;
search_flag = DRM_MM_SEARCH_BELOW; if (flags & PIN_HIGH)
alloc_flag = DRM_MM_CREATE_TOP; mode = DRM_MM_INSERT_HIGH;
} else { if (flags & PIN_MAPPABLE)
search_flag = DRM_MM_SEARCH_DEFAULT; mode = DRM_MM_INSERT_LOW;
alloc_flag = DRM_MM_CREATE_DEFAULT;
}
/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks, /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
* so we know that we always have a minimum alignment of 4096. * so we know that we always have a minimum alignment of 4096.
...@@ -3702,10 +3698,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, ...@@ -3702,10 +3698,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (alignment <= I915_GTT_MIN_ALIGNMENT) if (alignment <= I915_GTT_MIN_ALIGNMENT)
alignment = 0; alignment = 0;
err = drm_mm_insert_node_in_range_generic(&vm->mm, node, err = drm_mm_insert_node_in_range(&vm->mm, node,
size, alignment, color, size, alignment, color,
start, end, start, end, mode);
search_flag, alloc_flag);
if (err != -ENOSPC) if (err != -ENOSPC)
return err; return err;
...@@ -3743,9 +3738,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, ...@@ -3743,9 +3738,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
if (err) if (err)
return err; return err;
search_flag = DRM_MM_SEARCH_DEFAULT; return drm_mm_insert_node_in_range(&vm->mm, node,
return drm_mm_insert_node_in_range_generic(&vm->mm, node, size, alignment, color,
size, alignment, color, start, end, DRM_MM_INSERT_EVICT);
start, end,
search_flag, alloc_flag);
} }
...@@ -55,9 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, ...@@ -55,9 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
return -ENODEV; return -ENODEV;
mutex_lock(&dev_priv->mm.stolen_lock); mutex_lock(&dev_priv->mm.stolen_lock);
ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size, ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
alignment, start, end, size, alignment, 0,
DRM_MM_SEARCH_DEFAULT); start, end, DRM_MM_INSERT_BEST);
mutex_unlock(&dev_priv->mm.stolen_lock); mutex_unlock(&dev_priv->mm.stolen_lock);
return ret; return ret;
......
...@@ -54,8 +54,7 @@ static struct page **get_pages_vram(struct drm_gem_object *obj, ...@@ -54,8 +54,7 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
if (!p) if (!p)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
npages, 0, DRM_MM_SEARCH_DEFAULT);
if (ret) { if (ret) {
drm_free_large(p); drm_free_large(p);
return ERR_PTR(ret); return ERR_PTR(ret);
......
...@@ -45,8 +45,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, ...@@ -45,8 +45,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
if (WARN_ON(drm_mm_node_allocated(&vma->node))) if (WARN_ON(drm_mm_node_allocated(&vma->node)))
return 0; return 0;
ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages, ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
0, DRM_MM_SEARCH_DEFAULT);
if (ret) if (ret)
return ret; return ret;
......
...@@ -22,23 +22,24 @@ static unsigned int max_iterations = 8192; ...@@ -22,23 +22,24 @@ static unsigned int max_iterations = 8192;
static unsigned int max_prime = 128; static unsigned int max_prime = 128;
enum { enum {
DEFAULT,
TOPDOWN,
BEST, BEST,
BOTTOMUP,
TOPDOWN,
EVICT,
}; };
static const struct insert_mode { static const struct insert_mode {
const char *name; const char *name;
unsigned int search_flags; enum drm_mm_insert_mode mode;
unsigned int create_flags;
} insert_modes[] = { } insert_modes[] = {
[DEFAULT] = { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT }, [BEST] = { "best", DRM_MM_INSERT_BEST },
[TOPDOWN] = { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP }, [BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
[BEST] = { "best", DRM_MM_SEARCH_BEST, DRM_MM_CREATE_DEFAULT }, [TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
[EVICT] = { "evict", DRM_MM_INSERT_EVICT },
{} {}
}, evict_modes[] = { }, evict_modes[] = {
{ "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT }, { "bottom-up", DRM_MM_INSERT_LOW },
{ "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP }, { "top-down", DRM_MM_INSERT_HIGH },
{} {}
}; };
...@@ -526,8 +527,7 @@ static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node, ...@@ -526,8 +527,7 @@ static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
err = drm_mm_insert_node_generic(mm, node, err = drm_mm_insert_node_generic(mm, node,
size, alignment, color, size, alignment, color,
mode->search_flags, mode->mode);
mode->create_flags);
if (err) { if (err) {
pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n", pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
size, alignment, color, mode->name, err); size, alignment, color, mode->name, err);
...@@ -547,7 +547,7 @@ static bool expect_insert_fail(struct drm_mm *mm, u64 size) ...@@ -547,7 +547,7 @@ static bool expect_insert_fail(struct drm_mm *mm, u64 size)
struct drm_mm_node tmp = {}; struct drm_mm_node tmp = {};
int err; int err;
err = drm_mm_insert_node(mm, &tmp, size, 0, DRM_MM_SEARCH_DEFAULT); err = drm_mm_insert_node(mm, &tmp, size);
if (likely(err == -ENOSPC)) if (likely(err == -ENOSPC))
return true; return true;
...@@ -753,11 +753,10 @@ static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node, ...@@ -753,11 +753,10 @@ static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
{ {
int err; int err;
err = drm_mm_insert_node_in_range_generic(mm, node, err = drm_mm_insert_node_in_range(mm, node,
size, alignment, color, size, alignment, color,
range_start, range_end, range_start, range_end,
mode->search_flags, mode->mode);
mode->create_flags);
if (err) { if (err) {
pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n", pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
size, alignment, color, mode->name, size, alignment, color, mode->name,
...@@ -781,11 +780,10 @@ static bool expect_insert_in_range_fail(struct drm_mm *mm, ...@@ -781,11 +780,10 @@ static bool expect_insert_in_range_fail(struct drm_mm *mm,
struct drm_mm_node tmp = {}; struct drm_mm_node tmp = {};
int err; int err;
err = drm_mm_insert_node_in_range_generic(mm, &tmp, err = drm_mm_insert_node_in_range(mm, &tmp,
size, 0, 0, size, 0, 0,
range_start, range_end, range_start, range_end,
DRM_MM_SEARCH_DEFAULT, 0);
DRM_MM_CREATE_DEFAULT);
if (likely(err == -ENOSPC)) if (likely(err == -ENOSPC))
return true; return true;
...@@ -1324,7 +1322,7 @@ static int evict_something(struct drm_mm *mm, ...@@ -1324,7 +1322,7 @@ static int evict_something(struct drm_mm *mm,
drm_mm_scan_init_with_range(&scan, mm, drm_mm_scan_init_with_range(&scan, mm,
size, alignment, 0, size, alignment, 0,
range_start, range_end, range_start, range_end,
mode->create_flags); mode->mode);
if (!evict_nodes(&scan, if (!evict_nodes(&scan,
nodes, order, count, false, nodes, order, count, false,
&evict_list)) &evict_list))
...@@ -1332,8 +1330,7 @@ static int evict_something(struct drm_mm *mm, ...@@ -1332,8 +1330,7 @@ static int evict_something(struct drm_mm *mm,
memset(&tmp, 0, sizeof(tmp)); memset(&tmp, 0, sizeof(tmp));
err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0, err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
mode->search_flags, DRM_MM_INSERT_EVICT);
mode->create_flags);
if (err) { if (err) {
pr_err("Failed to insert into eviction hole: size=%d, align=%d\n", pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
size, alignment); size, alignment);
...@@ -1408,8 +1405,7 @@ static int igt_evict(void *ignored) ...@@ -1408,8 +1405,7 @@ static int igt_evict(void *ignored)
ret = -EINVAL; ret = -EINVAL;
drm_mm_init(&mm, 0, size); drm_mm_init(&mm, 0, size);
for (n = 0; n < size; n++) { for (n = 0; n < size; n++) {
err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0, err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
DRM_MM_SEARCH_DEFAULT);
if (err) { if (err) {
pr_err("insert failed, step %d\n", n); pr_err("insert failed, step %d\n", n);
ret = err; ret = err;
...@@ -1517,8 +1513,7 @@ static int igt_evict_range(void *ignored) ...@@ -1517,8 +1513,7 @@ static int igt_evict_range(void *ignored)
ret = -EINVAL; ret = -EINVAL;
drm_mm_init(&mm, 0, size); drm_mm_init(&mm, 0, size);
for (n = 0; n < size; n++) { for (n = 0; n < size; n++) {
err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0, err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
DRM_MM_SEARCH_DEFAULT);
if (err) { if (err) {
pr_err("insert failed, step %d\n", n); pr_err("insert failed, step %d\n", n);
ret = err; ret = err;
...@@ -1904,7 +1899,7 @@ static int evict_color(struct drm_mm *mm, ...@@ -1904,7 +1899,7 @@ static int evict_color(struct drm_mm *mm,
drm_mm_scan_init_with_range(&scan, mm, drm_mm_scan_init_with_range(&scan, mm,
size, alignment, color, size, alignment, color,
range_start, range_end, range_start, range_end,
mode->create_flags); mode->mode);
if (!evict_nodes(&scan, if (!evict_nodes(&scan,
nodes, order, count, true, nodes, order, count, true,
&evict_list)) &evict_list))
...@@ -1912,8 +1907,7 @@ static int evict_color(struct drm_mm *mm, ...@@ -1912,8 +1907,7 @@ static int evict_color(struct drm_mm *mm,
memset(&tmp, 0, sizeof(tmp)); memset(&tmp, 0, sizeof(tmp));
err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color, err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
mode->search_flags, DRM_MM_INSERT_EVICT);
mode->create_flags);
if (err) { if (err) {
pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n", pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
size, alignment, color, err); size, alignment, color, err);
......
...@@ -109,8 +109,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file, ...@@ -109,8 +109,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
if (pool == AGP_TYPE) { if (pool == AGP_TYPE) {
retval = drm_mm_insert_node(&dev_priv->agp_mm, retval = drm_mm_insert_node(&dev_priv->agp_mm,
&item->mm_node, &item->mm_node,
mem->size, 0, mem->size);
DRM_MM_SEARCH_DEFAULT);
offset = item->mm_node.start; offset = item->mm_node.start;
} else { } else {
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
...@@ -122,8 +121,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file, ...@@ -122,8 +121,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
#else #else
retval = drm_mm_insert_node(&dev_priv->vram_mm, retval = drm_mm_insert_node(&dev_priv->vram_mm,
&item->mm_node, &item->mm_node,
mem->size, 0, mem->size);
DRM_MM_SEARCH_DEFAULT);
offset = item->mm_node.start; offset = item->mm_node.start;
#endif #endif
} }
......
...@@ -128,8 +128,8 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) ...@@ -128,8 +128,8 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
if (!bo->mm) if (!bo->mm)
return -ENOMEM; return -ENOMEM;
err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size, err = drm_mm_insert_node_generic(&tegra->mm,
PAGE_SIZE, 0, 0, 0); bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
if (err < 0) { if (err < 0) {
dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n", dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
err); err);
......
...@@ -54,9 +54,8 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, ...@@ -54,9 +54,8 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
{ {
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm; struct drm_mm *mm = &rman->mm;
struct drm_mm_node *node = NULL; struct drm_mm_node *node;
enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST; enum drm_mm_insert_mode mode;
enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
unsigned long lpfn; unsigned long lpfn;
int ret; int ret;
...@@ -68,16 +67,15 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, ...@@ -68,16 +67,15 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
if (!node) if (!node)
return -ENOMEM; return -ENOMEM;
if (place->flags & TTM_PL_FLAG_TOPDOWN) { mode = DRM_MM_INSERT_BEST;
sflags = DRM_MM_SEARCH_BELOW; if (place->flags & TTM_PL_FLAG_TOPDOWN)
aflags = DRM_MM_CREATE_TOP; mode = DRM_MM_INSERT_HIGH;
}
spin_lock(&rman->lock); spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages, ret = drm_mm_insert_node_in_range(mm, node,
mem->num_pages,
mem->page_alignment, 0, mem->page_alignment, 0,
place->fpfn, lpfn, place->fpfn, lpfn, mode);
sflags, aflags);
spin_unlock(&rman->lock); spin_unlock(&rman->lock);
if (unlikely(ret)) { if (unlikely(ret)) {
......
...@@ -593,7 +593,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, ...@@ -593,7 +593,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
spin_lock_irqsave(&vc4->hvs->mm_lock, flags); spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm, ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm,
dlist_count, 1, 0); dlist_count);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags); spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
if (ret) if (ret)
return ret; return ret;
......
...@@ -141,8 +141,7 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs, ...@@ -141,8 +141,7 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
int ret, i; int ret, i;
u32 __iomem *dst_kernel; u32 __iomem *dst_kernel;
ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS, 1, ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
0);
if (ret) { if (ret) {
DRM_ERROR("Failed to allocate space for filter kernel: %d\n", DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
ret); ret);
......
...@@ -514,9 +514,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane, ...@@ -514,9 +514,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
if (lbm_size) { if (lbm_size) {
if (!vc4_state->lbm.allocated) { if (!vc4_state->lbm.allocated) {
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
ret = drm_mm_insert_node(&vc4->hvs->lbm_mm, ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
&vc4_state->lbm, &vc4_state->lbm,
lbm_size, 32, 0); lbm_size, 32, 0, 0);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
} else { } else {
WARN_ON_ONCE(lbm_size != vc4_state->lbm.size); WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
......
...@@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data, ...@@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data,
if (mem->type == VIA_MEM_AGP) if (mem->type == VIA_MEM_AGP)
retval = drm_mm_insert_node(&dev_priv->agp_mm, retval = drm_mm_insert_node(&dev_priv->agp_mm,
&item->mm_node, &item->mm_node,
tmpSize, 0, DRM_MM_SEARCH_DEFAULT); tmpSize);
else else
retval = drm_mm_insert_node(&dev_priv->vram_mm, retval = drm_mm_insert_node(&dev_priv->vram_mm,
&item->mm_node, &item->mm_node,
tmpSize, 0, DRM_MM_SEARCH_DEFAULT); tmpSize);
if (retval) if (retval)
goto fail_alloc; goto fail_alloc;
......
...@@ -673,16 +673,10 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, ...@@ -673,16 +673,10 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
memset(info->node, 0, sizeof(*info->node)); memset(info->node, 0, sizeof(*info->node));
spin_lock_bh(&man->lock); spin_lock_bh(&man->lock);
ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size, ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
0, 0,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
if (ret) { if (ret) {
vmw_cmdbuf_man_process(man); vmw_cmdbuf_man_process(man);
ret = drm_mm_insert_node_generic(&man->mm, info->node, ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
info->page_size, 0, 0,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
} }
spin_unlock_bh(&man->lock); spin_unlock_bh(&man->lock);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment