Commit 2e2cfbe6 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/vm: reduce number of entry-points to vm_map()

Pretty much everywhere had to make the decision which to use, so it
makes a lot more sense to just have one entrypoint decide the path
to take instead.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 2510538f
...@@ -131,9 +131,5 @@ void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *); ...@@ -131,9 +131,5 @@ void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *); void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *); void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length); void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
struct nouveau_mem *);
void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
struct nouveau_mem *mem);
#endif #endif
...@@ -72,13 +72,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node) ...@@ -72,13 +72,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
vmm->flush(vm); vmm->flush(vm);
} }
void static void
nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
{
nouveau_vm_map_at(vma, 0, node);
}
void
nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length, nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
struct nouveau_mem *mem) struct nouveau_mem *mem)
{ {
...@@ -136,7 +130,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length, ...@@ -136,7 +130,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
vmm->flush(vm); vmm->flush(vm);
} }
void static void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
struct nouveau_mem *mem) struct nouveau_mem *mem)
{ {
...@@ -174,6 +168,18 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, ...@@ -174,6 +168,18 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
vmm->flush(vm); vmm->flush(vm);
} }
void
nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
{
if (node->sg)
nouveau_vm_map_sg_table(vma, 0, node->size << 12, node);
else
if (node->pages)
nouveau_vm_map_sg(vma, 0, node->size << 12, node);
else
nouveau_vm_map_at(vma, 0, node);
}
void void
nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
{ {
......
...@@ -965,11 +965,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo, ...@@ -965,11 +965,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
if (ret) if (ret)
return ret; return ret;
if (mem->mem_type == TTM_PL_VRAM) nouveau_vm_map(vma, node);
nouveau_vm_map(vma, node);
else
nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
return 0; return 0;
} }
...@@ -1147,19 +1143,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) ...@@ -1147,19 +1143,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
return; return;
list_for_each_entry(vma, &nvbo->vma_list, head) { list_for_each_entry(vma, &nvbo->vma_list, head) {
if (new_mem && new_mem->mem_type == TTM_PL_VRAM) { if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
(new_mem->mem_type == TTM_PL_VRAM ||
nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
nouveau_vm_map(vma, new_mem->mm_node); nouveau_vm_map(vma, new_mem->mm_node);
} else
if (new_mem && new_mem->mem_type == TTM_PL_TT &&
nvbo->page_shift == vma->vm->vmm->spg_shift) {
if (((struct nouveau_mem *)new_mem->mm_node)->sg)
nouveau_vm_map_sg_table(vma, 0, new_mem->
num_pages << PAGE_SHIFT,
new_mem->mm_node);
else
nouveau_vm_map_sg(vma, 0, new_mem->
num_pages << PAGE_SHIFT,
new_mem->mm_node);
} else { } else {
nouveau_vm_unmap(vma); nouveau_vm_unmap(vma);
} }
...@@ -1535,7 +1522,6 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, ...@@ -1535,7 +1522,6 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
struct nouveau_vma *vma) struct nouveau_vma *vma)
{ {
const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
struct nouveau_mem *node = nvbo->bo.mem.mm_node;
int ret; int ret;
ret = nouveau_vm_get(vm, size, nvbo->page_shift, ret = nouveau_vm_get(vm, size, nvbo->page_shift,
...@@ -1543,15 +1529,10 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, ...@@ -1543,15 +1529,10 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
if (ret) if (ret)
return ret; return ret;
if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
(nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
nvbo->page_shift != vma->vm->vmm->lpg_shift))
nouveau_vm_map(vma, nvbo->bo.mem.mm_node); nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
else if (nvbo->bo.mem.mem_type == TTM_PL_TT &&
nvbo->page_shift == vma->vm->vmm->spg_shift) {
if (node->sg)
nouveau_vm_map_sg_table(vma, 0, size, node);
else
nouveau_vm_map_sg(vma, 0, size, node);
}
list_add_tail(&vma->head, &nvbo->vma_list); list_add_tail(&vma->head, &nvbo->vma_list);
vma->refcount = 1; vma->refcount = 1;
......
...@@ -31,16 +31,17 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) ...@@ -31,16 +31,17 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *node = mem->mm_node; struct nouveau_mem *node = mem->mm_node;
u64 size = mem->num_pages << 12;
if (ttm->sg) { if (ttm->sg) {
node->sg = ttm->sg; node->sg = ttm->sg;
nouveau_vm_map_sg_table(&node->vma[0], 0, size, node); node->pages = NULL;
} else { } else {
node->sg = NULL;
node->pages = nvbe->ttm.dma_address; node->pages = nvbe->ttm.dma_address;
nouveau_vm_map_sg(&node->vma[0], 0, size, node);
} }
node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
nouveau_vm_map(&node->vma[0], node);
nvbe->node = node; nvbe->node = node;
return 0; return 0;
} }
...@@ -67,9 +68,13 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) ...@@ -67,9 +68,13 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
/* noop: bound in move_notify() */ /* noop: bound in move_notify() */
if (ttm->sg) { if (ttm->sg) {
node->sg = ttm->sg; node->sg = ttm->sg;
} else node->pages = NULL;
} else {
node->sg = NULL;
node->pages = nvbe->ttm.dma_address; node->pages = nvbe->ttm.dma_address;
}
node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
return 0; return 0;
} }
......
...@@ -171,6 +171,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, ...@@ -171,6 +171,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
node = kzalloc(sizeof(*node), GFP_KERNEL); node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node) if (!node)
return -ENOMEM; return -ENOMEM;
node->page_shift = 12; node->page_shift = 12;
switch (nv_device(drm->device)->card_type) { switch (nv_device(drm->device)->card_type) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment