Commit e44a0fe6 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: rework reserved VMID handling

Instead of reserving a VMID for a single process allow that many
processes use the reserved ID. This allows for proper isolation
between the processes.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 053499f7
...@@ -278,12 +278,13 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, ...@@ -278,12 +278,13 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub; unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx; uint64_t fence_context = adev->fence_context + ring->idx;
bool needs_flush = vm->use_cpu_for_update; bool needs_flush = vm->use_cpu_for_update;
uint64_t updates = amdgpu_vm_tlb_seq(vm); uint64_t updates = amdgpu_vm_tlb_seq(vm);
int r; int r;
*id = vm->reserved_vmid[vmhub]; *id = id_mgr->reserved;
if ((*id)->owner != vm->immediate.fence_context || if ((*id)->owner != vm->immediate.fence_context ||
!amdgpu_vmid_compatible(*id, job) || !amdgpu_vmid_compatible(*id, job) ||
(*id)->flushed_updates < updates || (*id)->flushed_updates < updates ||
...@@ -462,31 +463,27 @@ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, ...@@ -462,31 +463,27 @@ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
unsigned vmhub) unsigned vmhub)
{ {
struct amdgpu_vmid_mgr *id_mgr; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *idle;
int r = 0;
id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock); mutex_lock(&id_mgr->lock);
if (vm->reserved_vmid[vmhub]) if (vm->reserved_vmid[vmhub])
goto unlock; goto unlock;
if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
AMDGPU_VM_MAX_RESERVED_VMID) { ++id_mgr->reserved_use_count;
DRM_ERROR("Over limitation of reserved vmid\n"); if (!id_mgr->reserved) {
atomic_dec(&id_mgr->reserved_vmid_num); struct amdgpu_vmid *id;
r = -EINVAL;
goto unlock; id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
list);
/* Remove from normal round robin handling */
list_del_init(&id->list);
id_mgr->reserved = id;
} }
/* Select the first entry VMID */ vm->reserved_vmid[vmhub] = true;
idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
list_del_init(&idle->list);
vm->reserved_vmid[vmhub] = idle;
mutex_unlock(&id_mgr->lock);
return 0;
unlock: unlock:
mutex_unlock(&id_mgr->lock); mutex_unlock(&id_mgr->lock);
return r; return 0;
} }
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
...@@ -496,12 +493,12 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, ...@@ -496,12 +493,12 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock); mutex_lock(&id_mgr->lock);
if (vm->reserved_vmid[vmhub]) { if (vm->reserved_vmid[vmhub] &&
list_add(&vm->reserved_vmid[vmhub]->list, !--id_mgr->reserved_use_count) {
&id_mgr->ids_lru); /* give the reserved ID back to normal round robin */
vm->reserved_vmid[vmhub] = NULL; list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
atomic_dec(&id_mgr->reserved_vmid_num);
} }
vm->reserved_vmid[vmhub] = false;
mutex_unlock(&id_mgr->lock); mutex_unlock(&id_mgr->lock);
} }
...@@ -568,7 +565,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) ...@@ -568,7 +565,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
mutex_init(&id_mgr->lock); mutex_init(&id_mgr->lock);
INIT_LIST_HEAD(&id_mgr->ids_lru); INIT_LIST_HEAD(&id_mgr->ids_lru);
atomic_set(&id_mgr->reserved_vmid_num, 0); id_mgr->reserved_use_count = 0;
/* manage only VMIDs not used by KFD */ /* manage only VMIDs not used by KFD */
id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
......
...@@ -67,7 +67,8 @@ struct amdgpu_vmid_mgr { ...@@ -67,7 +67,8 @@ struct amdgpu_vmid_mgr {
unsigned num_ids; unsigned num_ids;
struct list_head ids_lru; struct list_head ids_lru;
struct amdgpu_vmid ids[AMDGPU_NUM_VMID]; struct amdgpu_vmid ids[AMDGPU_NUM_VMID];
atomic_t reserved_vmid_num; struct amdgpu_vmid *reserved;
unsigned int reserved_use_count;
}; };
int amdgpu_pasid_alloc(unsigned int bits); int amdgpu_pasid_alloc(unsigned int bits);
......
...@@ -119,9 +119,6 @@ struct amdgpu_bo_vm; ...@@ -119,9 +119,6 @@ struct amdgpu_bo_vm;
/* Reserve 2MB at top/bottom of address space for kernel use */ /* Reserve 2MB at top/bottom of address space for kernel use */
#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20) #define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
/* max vmids dedicated for process */
#define AMDGPU_VM_MAX_RESERVED_VMID 1
/* See vm_update_mode */ /* See vm_update_mode */
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
...@@ -298,8 +295,7 @@ struct amdgpu_vm { ...@@ -298,8 +295,7 @@ struct amdgpu_vm {
struct dma_fence *last_unlocked; struct dma_fence *last_unlocked;
unsigned int pasid; unsigned int pasid;
/* dedicated to vm */ bool reserved_vmid[AMDGPU_MAX_VMHUBS];
struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
bool use_cpu_for_update; bool use_cpu_for_update;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment