Commit 729eba33 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu: add more general vmm free/node handling functions

Aside from being a nice cleanup, these will to allow the upcoming direct
page mapping interfaces to play nicely with normal mappings.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 3968d692
...@@ -134,23 +134,10 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc) ...@@ -134,23 +134,10 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
goto fail; goto fail;
} }
if (vma->addr != addr) { vma = nvkm_vmm_node_split(vmm, vma, addr, size);
const u64 tail = vma->size + vma->addr - addr; if (!vma) {
if (ret = -ENOMEM, !(vma = nvkm_vma_tail(vma, tail))) ret = -ENOMEM;
goto fail; goto fail;
vma->part = true;
nvkm_vmm_node_insert(vmm, vma);
}
if (vma->size != size) {
const u64 tail = vma->size - size;
struct nvkm_vma *tmp;
if (ret = -ENOMEM, !(tmp = nvkm_vma_tail(vma, tail))) {
nvkm_vmm_unmap_region(vmm, vma);
goto fail;
}
tmp->part = true;
nvkm_vmm_node_insert(vmm, tmp);
} }
} }
vma->busy = true; vma->busy = true;
......
...@@ -767,6 +767,20 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) ...@@ -767,6 +767,20 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
return new; return new;
} }
static inline void
nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
rb_erase(&vma->tree, &vmm->free);
}
static inline void
nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
nvkm_vmm_free_remove(vmm, vma);
list_del(&vma->head);
kfree(vma);
}
static void static void
nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{ {
...@@ -795,7 +809,21 @@ nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) ...@@ -795,7 +809,21 @@ nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
rb_insert_color(&vma->tree, &vmm->free); rb_insert_color(&vma->tree, &vmm->free);
} }
void static inline void
nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
rb_erase(&vma->tree, &vmm->root);
}
static inline void
nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
nvkm_vmm_node_remove(vmm, vma);
list_del(&vma->head);
kfree(vma);
}
static void
nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{ {
struct rb_node **ptr = &vmm->root.rb_node; struct rb_node **ptr = &vmm->root.rb_node;
...@@ -834,6 +862,78 @@ nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr) ...@@ -834,6 +862,78 @@ nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
return NULL; return NULL;
} }
#define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL : \
list_entry((root)->head.dir, struct nvkm_vma, head))
static struct nvkm_vma *
nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev,
struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
{
if (next) {
if (vma->size == size) {
vma->size += next->size;
nvkm_vmm_node_delete(vmm, next);
if (prev) {
prev->size += vma->size;
nvkm_vmm_node_delete(vmm, vma);
return prev;
}
return vma;
}
BUG_ON(prev);
nvkm_vmm_node_remove(vmm, next);
vma->size -= size;
next->addr -= size;
next->size += size;
nvkm_vmm_node_insert(vmm, next);
return next;
}
if (prev) {
if (vma->size != size) {
nvkm_vmm_node_remove(vmm, vma);
prev->size += size;
vma->addr += size;
vma->size -= size;
nvkm_vmm_node_insert(vmm, vma);
} else {
prev->size += vma->size;
nvkm_vmm_node_delete(vmm, vma);
}
return prev;
}
return vma;
}
struct nvkm_vma *
nvkm_vmm_node_split(struct nvkm_vmm *vmm,
struct nvkm_vma *vma, u64 addr, u64 size)
{
struct nvkm_vma *prev = NULL;
if (vma->addr != addr) {
prev = vma;
if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr)))
return NULL;
vma->part = true;
nvkm_vmm_node_insert(vmm, vma);
}
if (vma->size != size) {
struct nvkm_vma *tmp;
if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
return NULL;
}
tmp->part = true;
nvkm_vmm_node_insert(vmm, tmp);
}
return vma;
}
static void static void
nvkm_vmm_dtor(struct nvkm_vmm *vmm) nvkm_vmm_dtor(struct nvkm_vmm *vmm)
{ {
...@@ -954,37 +1054,20 @@ nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu, ...@@ -954,37 +1054,20 @@ nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
return nvkm_vmm_ctor(func, mmu, hdr, addr, size, key, name, *pvmm); return nvkm_vmm_ctor(func, mmu, hdr, addr, size, key, name, *pvmm);
} }
#define node(root, dir) ((root)->head.dir == &vmm->list) ? NULL : \
list_entry((root)->head.dir, struct nvkm_vma, head)
void void
nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{ {
struct nvkm_vma *next; struct nvkm_vma *next = node(vma, next);
struct nvkm_vma *prev = NULL;
nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
nvkm_memory_unref(&vma->memory); nvkm_memory_unref(&vma->memory);
if (vma->part) { if (!vma->part || ((prev = node(vma, prev)), prev->memory))
struct nvkm_vma *prev = node(vma, prev); prev = NULL;
if (!prev->memory) { if (!next->part || next->memory)
prev->size += vma->size; next = NULL;
rb_erase(&vma->tree, &vmm->root); nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
list_del(&vma->head);
kfree(vma);
vma = prev;
}
}
next = node(vma, next);
if (next && next->part) {
if (!next->memory) {
vma->size += next->size;
rb_erase(&next->tree, &vmm->root);
list_del(&next->head);
kfree(next);
}
}
} }
void void
...@@ -1163,18 +1246,14 @@ nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) ...@@ -1163,18 +1246,14 @@ nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
struct nvkm_vma *prev, *next; struct nvkm_vma *prev, *next;
if ((prev = node(vma, prev)) && !prev->used) { if ((prev = node(vma, prev)) && !prev->used) {
rb_erase(&prev->tree, &vmm->free);
list_del(&prev->head);
vma->addr = prev->addr; vma->addr = prev->addr;
vma->size += prev->size; vma->size += prev->size;
kfree(prev); nvkm_vmm_free_delete(vmm, prev);
} }
if ((next = node(vma, next)) && !next->used) { if ((next = node(vma, next)) && !next->used) {
rb_erase(&next->tree, &vmm->free);
list_del(&next->head);
vma->size += next->size; vma->size += next->size;
kfree(next); nvkm_vmm_free_delete(vmm, next);
} }
nvkm_vmm_free_insert(vmm, vma); nvkm_vmm_free_insert(vmm, vma);
...@@ -1250,7 +1329,7 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma) ...@@ -1250,7 +1329,7 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
} }
/* Remove VMA from the list of allocated nodes. */ /* Remove VMA from the list of allocated nodes. */
rb_erase(&vma->tree, &vmm->root); nvkm_vmm_node_remove(vmm, vma);
/* Merge VMA back into the free list. */ /* Merge VMA back into the free list. */
vma->page = NVKM_VMA_PAGE_NONE; vma->page = NVKM_VMA_PAGE_NONE;
...@@ -1357,7 +1436,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse, ...@@ -1357,7 +1436,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
tail = ALIGN_DOWN(tail, vmm->func->page_block); tail = ALIGN_DOWN(tail, vmm->func->page_block);
if (addr <= tail && tail - addr >= size) { if (addr <= tail && tail - addr >= size) {
rb_erase(&this->tree, &vmm->free); nvkm_vmm_free_remove(vmm, this);
vma = this; vma = this;
break; break;
} }
......
...@@ -157,6 +157,8 @@ int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *, ...@@ -157,6 +157,8 @@ int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
u32 pd_header, u64 addr, u64 size, struct lock_class_key *, u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
const char *name, struct nvkm_vmm *); const char *name, struct nvkm_vmm *);
struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr); struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
u64 addr, u64 size);
int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref, int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
bool sparse, u8 page, u8 align, u64 size, bool sparse, u8 page, u8 align, u64 size,
struct nvkm_vma **pvma); struct nvkm_vma **pvma);
...@@ -165,7 +167,6 @@ void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *); ...@@ -165,7 +167,6 @@ void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *);
void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma); void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma);
struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail); struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
void nvkm_vmm_node_insert(struct nvkm_vmm *, struct nvkm_vma *);
int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32, int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
u64, u64, void *, u32, struct lock_class_key *, u64, u64, void *, u32, struct lock_class_key *,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment