Commit 3425df48 authored by Ben Skeggs's avatar Ben Skeggs

drm/nv50-nvc0: unmap buffers from the vm when they're evicted

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent a4154bbf
...@@ -508,10 +508,12 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -508,10 +508,12 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int ret; int ret;
src_offset = old_mem->start << PAGE_SHIFT; src_offset = old_mem->start << PAGE_SHIFT;
if (old_mem->mem_type == TTM_PL_VRAM) if (old_mem->mem_type == TTM_PL_VRAM) {
src_offset = nvbo->vma.offset; struct nouveau_vram *node = old_mem->mm_node;
else src_offset = node->tmp_vma.offset;
} else {
src_offset += dev_priv->gart_info.aper_base; src_offset += dev_priv->gart_info.aper_base;
}
dst_offset = new_mem->start << PAGE_SHIFT; dst_offset = new_mem->start << PAGE_SHIFT;
if (new_mem->mem_type == TTM_PL_VRAM) if (new_mem->mem_type == TTM_PL_VRAM)
...@@ -559,10 +561,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, ...@@ -559,10 +561,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int ret; int ret;
src_offset = old_mem->start << PAGE_SHIFT; src_offset = old_mem->start << PAGE_SHIFT;
if (old_mem->mem_type == TTM_PL_VRAM) if (old_mem->mem_type == TTM_PL_VRAM) {
src_offset = nvbo->vma.offset; struct nouveau_vram *node = old_mem->mm_node;
else src_offset = node->tmp_vma.offset;
} else {
src_offset += dev_priv->gart_info.aper_base; src_offset += dev_priv->gart_info.aper_base;
}
dst_offset = new_mem->start << PAGE_SHIFT; dst_offset = new_mem->start << PAGE_SHIFT;
if (new_mem->mem_type == TTM_PL_VRAM) if (new_mem->mem_type == TTM_PL_VRAM)
...@@ -711,6 +715,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, ...@@ -711,6 +715,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
{ {
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
struct ttm_mem_reg *old_mem = &bo->mem;
struct nouveau_channel *chan; struct nouveau_channel *chan;
int ret; int ret;
...@@ -720,6 +725,21 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, ...@@ -720,6 +725,21 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
} }
/* create temporary vma for old memory, this will get cleaned
* up after ttm destroys the ttm_mem_reg
*/
if (dev_priv->card_type >= NV_50 && old_mem->mem_type == TTM_PL_VRAM) {
struct nouveau_vram *node = old_mem->mm_node;
ret = nouveau_vm_get(chan->vm, old_mem->num_pages << PAGE_SHIFT,
nvbo->vma.node->type, NV_MEM_ACCESS_RO,
&node->tmp_vma);
if (ret)
goto out;
nouveau_vm_map(&node->tmp_vma, node);
}
if (dev_priv->card_type < NV_50) if (dev_priv->card_type < NV_50)
ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
else else
...@@ -733,6 +753,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, ...@@ -733,6 +753,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
no_wait_gpu, new_mem); no_wait_gpu, new_mem);
} }
out:
if (chan == dev_priv->channel) if (chan == dev_priv->channel)
mutex_unlock(&chan->mutex); mutex_unlock(&chan->mutex);
return ret; return ret;
...@@ -811,7 +832,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) ...@@ -811,7 +832,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
if (dev_priv->card_type < NV_50 || nvbo->no_vm) if (dev_priv->card_type < NV_50)
return; return;
switch (new_mem->mem_type) { switch (new_mem->mem_type) {
...@@ -820,6 +841,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) ...@@ -820,6 +841,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
break; break;
case TTM_PL_TT: case TTM_PL_TT:
default: default:
nouveau_vm_unmap(&nvbo->vma);
break; break;
} }
} }
......
...@@ -69,6 +69,7 @@ struct nouveau_vram { ...@@ -69,6 +69,7 @@ struct nouveau_vram {
struct drm_device *dev; struct drm_device *dev;
struct nouveau_vma bar_vma; struct nouveau_vma bar_vma;
struct nouveau_vma tmp_vma;
u8 page_shift; u8 page_shift;
struct list_head regions; struct list_head regions;
......
...@@ -710,8 +710,14 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man, ...@@ -710,8 +710,14 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
{ {
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_vram_engine *vram = &dev_priv->engine.vram; struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
struct nouveau_vram *node = mem->mm_node;
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
if (node->tmp_vma.node) {
nouveau_vm_unmap(&node->tmp_vma);
nouveau_vm_put(&node->tmp_vma);
}
vram->put(dev, (struct nouveau_vram **)&mem->mm_node); vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment