Commit 4eb085e4 authored by Matthew Wilcox's avatar Matthew Wilcox

drm/vmwgfx: Convert to new IDA API

Reorder allocation to avoid an awkward lock/unlock/lock sequence.
Simpler code due to being able to use ida_alloc_max(), even if we can't
eliminate the driver's spinlock.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
Reviewed-by: default avatarSinclair Yeh <syeh@vmware.com>
parent 485258b4
...@@ -51,51 +51,34 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, ...@@ -51,51 +51,34 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
{ {
struct vmwgfx_gmrid_man *gman = struct vmwgfx_gmrid_man *gman =
(struct vmwgfx_gmrid_man *)man->priv; (struct vmwgfx_gmrid_man *)man->priv;
int ret = 0;
int id; int id;
mem->mm_node = NULL; mem->mm_node = NULL;
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
if (id < 0)
return id;
spin_lock(&gman->lock); spin_lock(&gman->lock);
if (gman->max_gmr_pages > 0) { if (gman->max_gmr_pages > 0) {
gman->used_gmr_pages += bo->num_pages; gman->used_gmr_pages += bo->num_pages;
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
goto out_err_locked; goto nospace;
} }
do {
spin_unlock(&gman->lock);
if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
ret = -ENOMEM;
goto out_err;
}
spin_lock(&gman->lock);
ret = ida_get_new(&gman->gmr_ida, &id);
if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
ida_remove(&gman->gmr_ida, id);
ret = 0;
goto out_err_locked;
}
} while (ret == -EAGAIN);
if (likely(ret == 0)) {
mem->mm_node = gman; mem->mm_node = gman;
mem->start = id; mem->start = id;
mem->num_pages = bo->num_pages; mem->num_pages = bo->num_pages;
} else
goto out_err_locked;
spin_unlock(&gman->lock); spin_unlock(&gman->lock);
return 0; return 0;
out_err: nospace:
spin_lock(&gman->lock);
out_err_locked:
gman->used_gmr_pages -= bo->num_pages; gman->used_gmr_pages -= bo->num_pages;
spin_unlock(&gman->lock); spin_unlock(&gman->lock);
return ret; ida_free(&gman->gmr_ida, id);
return 0;
} }
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
...@@ -105,8 +88,8 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man, ...@@ -105,8 +88,8 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
(struct vmwgfx_gmrid_man *)man->priv; (struct vmwgfx_gmrid_man *)man->priv;
if (mem->mm_node) { if (mem->mm_node) {
ida_free(&gman->gmr_ida, mem->start);
spin_lock(&gman->lock); spin_lock(&gman->lock);
ida_remove(&gman->gmr_ida, mem->start);
gman->used_gmr_pages -= mem->num_pages; gman->used_gmr_pages -= mem->num_pages;
spin_unlock(&gman->lock); spin_unlock(&gman->lock);
mem->mm_node = NULL; mem->mm_node = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment