Commit e17841b9 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: Revert "add spin lock to protect freed list in vm (v3)"

Not needed any more because we need to protect the elements on the list anyway.

This reverts commit dae6ecf9e6c9b677e577826c3ac665c6dd9c490b.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent b5a5ec55
...@@ -971,22 +971,18 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, ...@@ -971,22 +971,18 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
int r; int r;
spin_lock(&vm->freed_lock);
while (!list_empty(&vm->freed)) { while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed, mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list); struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list); list_del(&mapping->list);
spin_unlock(&vm->freed_lock);
r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping, r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping,
0, NULL); 0, NULL);
kfree(mapping); kfree(mapping);
if (r) if (r)
return r; return r;
spin_lock(&vm->freed_lock);
} }
spin_unlock(&vm->freed_lock);
return 0; return 0;
} }
...@@ -1252,13 +1248,10 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, ...@@ -1252,13 +1248,10 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
spin_unlock(&vm->it_lock); spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping); trace_amdgpu_vm_bo_unmap(bo_va, mapping);
if (valid) { if (valid)
spin_lock(&vm->freed_lock);
list_add(&mapping->list, &vm->freed); list_add(&mapping->list, &vm->freed);
spin_unlock(&vm->freed_lock); else
} else {
kfree(mapping); kfree(mapping);
}
return 0; return 0;
} }
...@@ -1291,9 +1284,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, ...@@ -1291,9 +1284,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
interval_tree_remove(&mapping->it, &vm->va); interval_tree_remove(&mapping->it, &vm->va);
spin_unlock(&vm->it_lock); spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping); trace_amdgpu_vm_bo_unmap(bo_va, mapping);
spin_lock(&vm->freed_lock);
list_add(&mapping->list, &vm->freed); list_add(&mapping->list, &vm->freed);
spin_unlock(&vm->freed_lock);
} }
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
list_del(&mapping->list); list_del(&mapping->list);
...@@ -1357,7 +1348,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -1357,7 +1348,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->cleared); INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed); INIT_LIST_HEAD(&vm->freed);
spin_lock_init(&vm->it_lock); spin_lock_init(&vm->it_lock);
spin_lock_init(&vm->freed_lock);
pd_size = amdgpu_vm_directory_size(adev); pd_size = amdgpu_vm_directory_size(adev);
pd_entries = amdgpu_vm_num_pdes(adev); pd_entries = amdgpu_vm_num_pdes(adev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment