Commit 05906dec authored by Bas Nieuwenhuizen's avatar Bas Nieuwenhuizen Committed by Alex Deucher

drm/amdgpu: wait on page directory changes. v2

Pagetables can be moved and therefore the page directory update can be necessary
for the current cs even if none of the the bo's are moved. In that scenario
there is no fence between the sdma0 and gfx ring, so we add one.

v2 (chk): rebased
Signed-off-by: default avatarBas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent b325a789
...@@ -982,6 +982,7 @@ struct amdgpu_vm { ...@@ -982,6 +982,7 @@ struct amdgpu_vm {
/* contains the page directory */ /* contains the page directory */
struct amdgpu_bo *page_directory; struct amdgpu_bo *page_directory;
unsigned max_pde_used; unsigned max_pde_used;
struct fence *page_directory_fence;
/* array of page tables, one for each page directory entry */ /* array of page tables, one for each page directory entry */
struct amdgpu_vm_pt *page_tables; struct amdgpu_vm_pt *page_tables;
......
...@@ -551,6 +551,10 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, ...@@ -551,6 +551,10 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (r) if (r)
return r; return r;
r = amdgpu_sync_fence(adev, &p->ibs[0].sync, vm->page_directory_fence);
if (r)
return r;
r = amdgpu_vm_clear_freed(adev, vm); r = amdgpu_vm_clear_freed(adev, vm);
if (r) if (r)
return r; return r;
......
...@@ -495,7 +495,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ...@@ -495,7 +495,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
&fence); &fence);
if (r) if (r)
goto error_free; goto error_free;
amdgpu_bo_fence(pd, fence, true); amdgpu_bo_fence(pd, fence, true);
fence_put(vm->page_directory_fence);
vm->page_directory_fence = fence_get(fence);
fence_put(fence); fence_put(fence);
} }
...@@ -1291,6 +1294,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -1291,6 +1294,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
return -ENOMEM; return -ENOMEM;
} }
vm->page_directory_fence = NULL;
r = amdgpu_bo_create(adev, pd_size, align, true, r = amdgpu_bo_create(adev, pd_size, align, true,
AMDGPU_GEM_DOMAIN_VRAM, 0, AMDGPU_GEM_DOMAIN_VRAM, 0,
NULL, &vm->page_directory); NULL, &vm->page_directory);
...@@ -1339,6 +1344,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -1339,6 +1344,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
kfree(vm->page_tables); kfree(vm->page_tables);
amdgpu_bo_unref(&vm->page_directory); amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
amdgpu_fence_unref(&vm->ids[i].flushed_updates); amdgpu_fence_unref(&vm->ids[i].flushed_updates);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment