Commit bcb1ba35 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: merge VM manager and VM context ID structure

No need to have two of them any more.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent bd4c72d1
...@@ -839,13 +839,6 @@ struct amdgpu_vm_pt { ...@@ -839,13 +839,6 @@ struct amdgpu_vm_pt {
uint64_t addr; uint64_t addr;
}; };
struct amdgpu_vm_id {
struct amdgpu_vm_manager_id *mgr_id;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct fence *flushed_updates;
};
struct amdgpu_vm { struct amdgpu_vm {
/* tree of virtual addresses mapped */ /* tree of virtual addresses mapped */
struct rb_root va; struct rb_root va;
...@@ -871,7 +864,7 @@ struct amdgpu_vm { ...@@ -871,7 +864,7 @@ struct amdgpu_vm {
struct amdgpu_vm_pt *page_tables; struct amdgpu_vm_pt *page_tables;
/* for id and flush management per ring */ /* for id and flush management per ring */
struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
/* protecting freed */ /* protecting freed */
spinlock_t freed_lock; spinlock_t freed_lock;
...@@ -880,11 +873,15 @@ struct amdgpu_vm { ...@@ -880,11 +873,15 @@ struct amdgpu_vm {
struct amd_sched_entity entity; struct amd_sched_entity entity;
}; };
struct amdgpu_vm_manager_id { struct amdgpu_vm_id {
struct list_head list; struct list_head list;
struct fence *active; struct fence *active;
atomic_long_t owner; atomic_long_t owner;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct fence *flushed_updates;
uint32_t gds_base; uint32_t gds_base;
uint32_t gds_size; uint32_t gds_size;
uint32_t gws_base; uint32_t gws_base;
...@@ -898,7 +895,7 @@ struct amdgpu_vm_manager { ...@@ -898,7 +895,7 @@ struct amdgpu_vm_manager {
struct mutex lock; struct mutex lock;
unsigned num_ids; unsigned num_ids;
struct list_head ids_lru; struct list_head ids_lru;
struct amdgpu_vm_manager_id ids[AMDGPU_NUM_VM]; struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
uint32_t max_pfn; uint32_t max_pfn;
/* vram base address for page table entry */ /* vram base address for page table entry */
......
...@@ -166,43 +166,41 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -166,43 +166,41 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
{ {
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_vm_id *id = &vm->ids[ring->idx]; struct amdgpu_vm_id *id = vm->ids[ring->idx];
struct fence *updates = sync->last_vm_update; struct fence *updates = sync->last_vm_update;
int r; int r;
mutex_lock(&adev->vm_manager.lock); mutex_lock(&adev->vm_manager.lock);
/* check if the id is still valid */ /* check if the id is still valid */
if (id->mgr_id) { if (id) {
struct fence *flushed = id->flushed_updates; struct fence *flushed = id->flushed_updates;
bool is_later; long owner = atomic_long_read(&id->owner);
long owner; bool usable = pd_addr == id->pd_gpu_addr;
if (!flushed) if (owner != (long)&vm->ids[ring->idx])
is_later = true; usable = false;
else if (!flushed)
usable = false;
else if (!updates) else if (!updates)
is_later = false; usable = true;
else else
is_later = fence_is_later(updates, flushed); usable = !fence_is_later(updates, flushed);
owner = atomic_long_read(&id->mgr_id->owner); if (usable) {
if (!is_later && owner == (long)id &&
pd_addr == id->pd_gpu_addr) {
r = amdgpu_sync_fence(ring->adev, sync, r = amdgpu_sync_fence(ring->adev, sync, id->active);
id->mgr_id->active);
if (r) { if (r) {
mutex_unlock(&adev->vm_manager.lock); mutex_unlock(&adev->vm_manager.lock);
return r; return r;
} }
fence_put(id->mgr_id->active); fence_put(id->active);
id->mgr_id->active = fence_get(fence); id->active = fence_get(fence);
list_move_tail(&id->mgr_id->list, list_move_tail(&id->list, &adev->vm_manager.ids_lru);
&adev->vm_manager.ids_lru);
*vm_id = id->mgr_id - adev->vm_manager.ids; *vm_id = id - adev->vm_manager.ids;
*vm_pd_addr = AMDGPU_VM_NO_FLUSH; *vm_pd_addr = AMDGPU_VM_NO_FLUSH;
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id,
*vm_pd_addr); *vm_pd_addr);
...@@ -212,38 +210,41 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -212,38 +210,41 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
} }
} }
id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru, id = list_first_entry(&adev->vm_manager.ids_lru,
struct amdgpu_vm_manager_id, struct amdgpu_vm_id,
list); list);
if (id->mgr_id->active && !fence_is_signaled(id->mgr_id->active)) { if (id->active && !fence_is_signaled(id->active)) {
struct amdgpu_vm_manager_id *mgr_id, *tmp; struct amdgpu_vm_id *tmp;
struct list_head *head = &adev->vm_manager.ids_lru; struct list_head *head = &adev->vm_manager.ids_lru;
list_for_each_entry_safe(mgr_id, tmp, &adev->vm_manager.ids_lru, list) {
if (mgr_id->active && fence_is_signaled(mgr_id->active)) { list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru,
list_move(&mgr_id->list, head); list) {
head = &mgr_id->list; if (id->active && fence_is_signaled(id->active)) {
list_move(&id->list, head);
head = &id->list;
} }
} }
id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru, id = list_first_entry(&adev->vm_manager.ids_lru,
struct amdgpu_vm_manager_id, struct amdgpu_vm_id,
list); list);
} }
r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active); r = amdgpu_sync_fence(ring->adev, sync, id->active);
if (!r) { if (!r) {
fence_put(id->mgr_id->active); fence_put(id->active);
id->mgr_id->active = fence_get(fence); id->active = fence_get(fence);
fence_put(id->flushed_updates); fence_put(id->flushed_updates);
id->flushed_updates = fence_get(updates); id->flushed_updates = fence_get(updates);
id->pd_gpu_addr = pd_addr; id->pd_gpu_addr = pd_addr;
list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru); list_move_tail(&id->list, &adev->vm_manager.ids_lru);
atomic_long_set(&id->mgr_id->owner, (long)id); atomic_long_set(&id->owner, (long)&vm->ids[ring->idx]);
vm->ids[ring->idx] = id;
*vm_id = id->mgr_id - adev->vm_manager.ids; *vm_id = id - adev->vm_manager.ids;
*vm_pd_addr = pd_addr; *vm_pd_addr = pd_addr;
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
} }
...@@ -268,14 +269,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, ...@@ -268,14 +269,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
uint32_t oa_base, uint32_t oa_size) uint32_t oa_base, uint32_t oa_size)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id]; struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
bool gds_switch_needed = ring->funcs->emit_gds_switch && ( bool gds_switch_needed = ring->funcs->emit_gds_switch && (
mgr_id->gds_base != gds_base || id->gds_base != gds_base ||
mgr_id->gds_size != gds_size || id->gds_size != gds_size ||
mgr_id->gws_base != gws_base || id->gws_base != gws_base ||
mgr_id->gws_size != gws_size || id->gws_size != gws_size ||
mgr_id->oa_base != oa_base || id->oa_base != oa_base ||
mgr_id->oa_size != oa_size); id->oa_size != oa_size);
if (ring->funcs->emit_pipeline_sync && ( if (ring->funcs->emit_pipeline_sync && (
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed)) pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
...@@ -287,12 +288,12 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, ...@@ -287,12 +288,12 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
} }
if (gds_switch_needed) { if (gds_switch_needed) {
mgr_id->gds_base = gds_base; id->gds_base = gds_base;
mgr_id->gds_size = gds_size; id->gds_size = gds_size;
mgr_id->gws_base = gws_base; id->gws_base = gws_base;
mgr_id->gws_size = gws_size; id->gws_size = gws_size;
mgr_id->oa_base = oa_base; id->oa_base = oa_base;
mgr_id->oa_size = oa_size; id->oa_size = oa_size;
amdgpu_ring_emit_gds_switch(ring, vm_id, amdgpu_ring_emit_gds_switch(ring, vm_id,
gds_base, gds_size, gds_base, gds_size,
gws_base, gws_size, gws_base, gws_size,
...@@ -310,14 +311,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, ...@@ -310,14 +311,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
*/ */
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id) void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
{ {
struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id]; struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
mgr_id->gds_base = 0; id->gds_base = 0;
mgr_id->gds_size = 0; id->gds_size = 0;
mgr_id->gws_base = 0; id->gws_base = 0;
mgr_id->gws_size = 0; id->gws_size = 0;
mgr_id->oa_base = 0; id->oa_base = 0;
mgr_id->oa_size = 0; id->oa_size = 0;
} }
/** /**
...@@ -1345,10 +1346,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -1345,10 +1346,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct amd_sched_rq *rq; struct amd_sched_rq *rq;
int i, r; int i, r;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
vm->ids[i].mgr_id = NULL; vm->ids[i] = NULL;
vm->ids[i].flushed_updates = NULL;
}
vm->va = RB_ROOT; vm->va = RB_ROOT;
spin_lock_init(&vm->status_lock); spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated); INIT_LIST_HEAD(&vm->invalidated);
...@@ -1443,12 +1442,12 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -1443,12 +1442,12 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
fence_put(vm->page_directory_fence); fence_put(vm->page_directory_fence);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_vm_id *id = &vm->ids[i]; struct amdgpu_vm_id *id = vm->ids[i];
if (id->mgr_id) if (!id)
atomic_long_cmpxchg(&id->mgr_id->owner, continue;
(long)id, 0);
fence_put(id->flushed_updates); atomic_long_cmpxchg(&id->owner, (long)&vm->ids[i], 0);
} }
} }
...@@ -1486,6 +1485,10 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) ...@@ -1486,6 +1485,10 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{ {
unsigned i; unsigned i;
for (i = 0; i < AMDGPU_NUM_VM; ++i) for (i = 0; i < AMDGPU_NUM_VM; ++i) {
fence_put(adev->vm_manager.ids[i].active); struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
fence_put(id->active);
fence_put(id->flushed_updates);
}
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment