Commit 61335d7a authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Use an RBtree instead of linked list for MOB resources

With emulated coherent memory we need to be able to quickly look up
a resource from the MOB offset. Instead of traversing a linked list with
O(n) worst case, use an RBtree with O(log n) worst case complexity.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarDeepak Rawat <drawat@vmware.com>
parent b7468b15
...@@ -463,6 +463,7 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo) ...@@ -463,6 +463,7 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
WARN_ON(vmw_bo->dirty); WARN_ON(vmw_bo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
vmw_bo_unmap(vmw_bo); vmw_bo_unmap(vmw_bo);
kfree(vmw_bo); kfree(vmw_bo);
} }
...@@ -479,6 +480,7 @@ static void vmw_user_bo_destroy(struct ttm_buffer_object *bo) ...@@ -479,6 +480,7 @@ static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
struct vmw_buffer_object *vbo = &vmw_user_bo->vbo; struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
WARN_ON(vbo->dirty); WARN_ON(vbo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
vmw_bo_unmap(vbo); vmw_bo_unmap(vbo);
ttm_prime_object_kfree(vmw_user_bo, prime); ttm_prime_object_kfree(vmw_user_bo, prime);
} }
...@@ -514,8 +516,7 @@ int vmw_bo_init(struct vmw_private *dev_priv, ...@@ -514,8 +516,7 @@ int vmw_bo_init(struct vmw_private *dev_priv,
memset(vmw_bo, 0, sizeof(*vmw_bo)); memset(vmw_bo, 0, sizeof(*vmw_bo));
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->base.priority = 3; vmw_bo->base.priority = 3;
vmw_bo->res_tree = RB_ROOT;
INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size, ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement, ttm_bo_type_device, placement,
......
...@@ -100,7 +100,7 @@ struct vmw_fpriv { ...@@ -100,7 +100,7 @@ struct vmw_fpriv {
/** /**
* struct vmw_buffer_object - TTM buffer object with vmwgfx additions * struct vmw_buffer_object - TTM buffer object with vmwgfx additions
* @base: The TTM buffer object * @base: The TTM buffer object
* @res_list: List of resources using this buffer object as a backing MOB * @res_tree: RB tree of resources using this buffer object as a backing MOB
* @pin_count: pin depth * @pin_count: pin depth
* @cpu_writers: Number of synccpu write grabs. Protected by reservation when * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
* increased. May be decreased without reservation. * increased. May be decreased without reservation.
...@@ -111,7 +111,7 @@ struct vmw_fpriv { ...@@ -111,7 +111,7 @@ struct vmw_fpriv {
*/ */
struct vmw_buffer_object { struct vmw_buffer_object {
struct ttm_buffer_object base; struct ttm_buffer_object base;
struct list_head res_list; struct rb_root res_tree;
s32 pin_count; s32 pin_count;
atomic_t cpu_writers; atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */ /* Not ref-counted. Protected by binding_mutex */
...@@ -160,8 +160,8 @@ struct vmw_res_func; ...@@ -160,8 +160,8 @@ struct vmw_res_func;
* pin-count greater than zero. It is not on the resource LRU lists and its * pin-count greater than zero. It is not on the resource LRU lists and its
* backup buffer is pinned. Hence it can't be evicted. * backup buffer is pinned. Hence it can't be evicted.
* @func: Method vtable for this resource. Immutable. * @func: Method vtable for this resource. Immutable.
* @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved.
* @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
* @mob_head: List head for the MOB backup list. Protected by @backup reserved.
* @binding_head: List head for the context binding list. Protected by * @binding_head: List head for the context binding list. Protected by
* the @dev_priv::binding_mutex * the @dev_priv::binding_mutex
* @res_free: The resource destructor. * @res_free: The resource destructor.
...@@ -182,8 +182,8 @@ struct vmw_resource { ...@@ -182,8 +182,8 @@ struct vmw_resource {
unsigned long backup_offset; unsigned long backup_offset;
unsigned long pin_count; unsigned long pin_count;
const struct vmw_res_func *func; const struct vmw_res_func *func;
struct rb_node mob_node;
struct list_head lru_head; struct list_head lru_head;
struct list_head mob_head;
struct list_head binding_head; struct list_head binding_head;
struct vmw_resource_dirty *dirty; struct vmw_resource_dirty *dirty;
void (*res_free) (struct vmw_resource *res); void (*res_free) (struct vmw_resource *res);
...@@ -737,7 +737,7 @@ void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, ...@@ -737,7 +737,7 @@ void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
*/ */
static inline bool vmw_resource_mob_attached(const struct vmw_resource *res) static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
{ {
return !list_empty(&res->mob_head); return !RB_EMPTY_NODE(&res->mob_node);
} }
/** /**
......
...@@ -40,11 +40,24 @@ ...@@ -40,11 +40,24 @@
void vmw_resource_mob_attach(struct vmw_resource *res) void vmw_resource_mob_attach(struct vmw_resource *res)
{ {
struct vmw_buffer_object *backup = res->backup; struct vmw_buffer_object *backup = res->backup;
struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
dma_resv_assert_held(res->backup->base.base.resv); dma_resv_assert_held(res->backup->base.base.resv);
res->used_prio = (res->res_dirty) ? res->func->dirty_prio : res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
res->func->prio; res->func->prio;
list_add_tail(&res->mob_head, &backup->res_list);
while (*new) {
struct vmw_resource *this =
container_of(*new, struct vmw_resource, mob_node);
parent = *new;
new = (res->backup_offset < this->backup_offset) ?
&((*new)->rb_left) : &((*new)->rb_right);
}
rb_link_node(&res->mob_node, parent, new);
rb_insert_color(&res->mob_node, &backup->res_tree);
vmw_bo_prio_add(backup, res->used_prio); vmw_bo_prio_add(backup, res->used_prio);
} }
...@@ -58,7 +71,8 @@ void vmw_resource_mob_detach(struct vmw_resource *res) ...@@ -58,7 +71,8 @@ void vmw_resource_mob_detach(struct vmw_resource *res)
dma_resv_assert_held(backup->base.base.resv); dma_resv_assert_held(backup->base.base.resv);
if (vmw_resource_mob_attached(res)) { if (vmw_resource_mob_attached(res)) {
list_del_init(&res->mob_head); rb_erase(&res->mob_node, &backup->res_tree);
RB_CLEAR_NODE(&res->mob_node);
vmw_bo_prio_del(backup, res->used_prio); vmw_bo_prio_del(backup, res->used_prio);
} }
} }
...@@ -204,8 +218,8 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, ...@@ -204,8 +218,8 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
res->res_free = res_free; res->res_free = res_free;
res->dev_priv = dev_priv; res->dev_priv = dev_priv;
res->func = func; res->func = func;
RB_CLEAR_NODE(&res->mob_node);
INIT_LIST_HEAD(&res->lru_head); INIT_LIST_HEAD(&res->lru_head);
INIT_LIST_HEAD(&res->mob_head);
INIT_LIST_HEAD(&res->binding_head); INIT_LIST_HEAD(&res->binding_head);
res->id = -1; res->id = -1;
res->backup = NULL; res->backup = NULL;
...@@ -753,19 +767,20 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr) ...@@ -753,19 +767,20 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
*/ */
void vmw_resource_unbind_list(struct vmw_buffer_object *vbo) void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
{ {
struct vmw_resource *res, *next;
struct ttm_validate_buffer val_buf = { struct ttm_validate_buffer val_buf = {
.bo = &vbo->base, .bo = &vbo->base,
.num_shared = 0 .num_shared = 0
}; };
dma_resv_assert_held(vbo->base.base.resv); dma_resv_assert_held(vbo->base.base.resv);
list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) { while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
if (!res->func->unbind) struct rb_node *node = vbo->res_tree.rb_node;
continue; struct vmw_resource *res =
container_of(node, struct vmw_resource, mob_node);
if (!WARN_ON_ONCE(!res->func->unbind))
(void) res->func->unbind(res, res->res_dirty, &val_buf);
(void) res->func->unbind(res, res->res_dirty, &val_buf);
res->backup_dirty = true; res->backup_dirty = true;
res->res_dirty = false; res->res_dirty = false;
vmw_resource_mob_detach(res); vmw_resource_mob_detach(res);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment