Commit 39f7f69a authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: add bo_list iterators

Add helpers to iterate over all entries in a bo_list.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming  Zhou <david1.zhou@amd.com>
Acked-by: default avatarHuang Rui <ray.huang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent a0f20845
...@@ -43,12 +43,12 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, ...@@ -43,12 +43,12 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
static void amdgpu_bo_list_release_rcu(struct kref *ref) static void amdgpu_bo_list_release_rcu(struct kref *ref)
{ {
unsigned i;
struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list, struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
refcount); refcount);
struct amdgpu_bo_list_entry *e;
for (i = 0; i < list->num_entries; ++i) amdgpu_bo_list_for_each_entry(e, list)
amdgpu_bo_unref(&list->array[i].robj); amdgpu_bo_unref(&e->robj);
kvfree(list->array); kvfree(list->array);
kfree_rcu(list, rhead); kfree_rcu(list, rhead);
...@@ -103,6 +103,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, ...@@ -103,6 +103,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo; struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
unsigned last_entry = 0, first_userptr = num_entries; unsigned last_entry = 0, first_userptr = num_entries;
struct amdgpu_bo_list_entry *e;
uint64_t total_size = 0; uint64_t total_size = 0;
unsigned i; unsigned i;
int r; int r;
...@@ -156,7 +157,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, ...@@ -156,7 +157,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
trace_amdgpu_bo_list_set(list, entry->robj); trace_amdgpu_bo_list_set(list, entry->robj);
} }
for (i = 0; i < list->num_entries; ++i) amdgpu_bo_list_for_each_entry(e, list)
amdgpu_bo_unref(&list->array[i].robj); amdgpu_bo_unref(&list->array[i].robj);
kvfree(list->array); kvfree(list->array);
...@@ -201,6 +202,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, ...@@ -201,6 +202,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
* concatenated in descending order. * concatenated in descending order.
*/ */
struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS]; struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
struct amdgpu_bo_list_entry *e;
unsigned i; unsigned i;
for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
...@@ -211,14 +213,13 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, ...@@ -211,14 +213,13 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
* in the list, the sort mustn't change the ordering of buffers * in the list, the sort mustn't change the ordering of buffers
* with the same priority, i.e. it must be stable. * with the same priority, i.e. it must be stable.
*/ */
for (i = 0; i < list->num_entries; i++) { amdgpu_bo_list_for_each_entry(e, list) {
unsigned priority = list->array[i].priority; unsigned priority = e->priority;
if (!list->array[i].robj->parent) if (!e->robj->parent)
list_add_tail(&list->array[i].tv.head, list_add_tail(&e->tv.head, &bucket[priority]);
&bucket[priority]);
list->array[i].user_pages = NULL; e->user_pages = NULL;
} }
/* Connect the sorted buckets in the output list. */ /* Connect the sorted buckets in the output list. */
......
...@@ -65,4 +65,14 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, ...@@ -65,4 +65,14 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev,
unsigned num_entries, unsigned num_entries,
struct amdgpu_bo_list **list); struct amdgpu_bo_list **list);
#define amdgpu_bo_list_for_each_entry(e, list) \
for (e = &(list)->array[0]; \
e != &(list)->array[(list)->num_entries]; \
++e)
#define amdgpu_bo_list_for_each_userptr_entry(e, list) \
for (e = &(list)->array[(list)->first_userptr]; \
e != &(list)->array[(list)->num_entries]; \
++e)
#endif #endif
...@@ -563,10 +563,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -563,10 +563,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_bo_list_entry *e; struct amdgpu_bo_list_entry *e;
struct list_head duplicates; struct list_head duplicates;
unsigned i, tries = 10;
struct amdgpu_bo *gds; struct amdgpu_bo *gds;
struct amdgpu_bo *gws; struct amdgpu_bo *gws;
struct amdgpu_bo *oa; struct amdgpu_bo *oa;
unsigned tries = 10;
int r; int r;
INIT_LIST_HEAD(&p->validated); INIT_LIST_HEAD(&p->validated);
...@@ -596,7 +596,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -596,7 +596,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
while (1) { while (1) {
struct list_head need_pages; struct list_head need_pages;
unsigned i;
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
&duplicates); &duplicates);
...@@ -611,12 +610,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -611,12 +610,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
break; break;
INIT_LIST_HEAD(&need_pages); INIT_LIST_HEAD(&need_pages);
for (i = p->bo_list->first_userptr; amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
i < p->bo_list->num_entries; ++i) { struct amdgpu_bo *bo = e->robj;
struct amdgpu_bo *bo;
e = &p->bo_list->array[i];
bo = e->robj;
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm, if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) { &e->user_invalidated) && e->user_pages) {
...@@ -710,16 +705,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -710,16 +705,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (p->bo_list) { if (p->bo_list) {
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
unsigned i; struct amdgpu_bo_list_entry *e;
gds = p->bo_list->gds_obj; gds = p->bo_list->gds_obj;
gws = p->bo_list->gws_obj; gws = p->bo_list->gws_obj;
oa = p->bo_list->oa_obj; oa = p->bo_list->oa_obj;
for (i = 0; i < p->bo_list->num_entries; i++) {
struct amdgpu_bo *bo = p->bo_list->array[i].robj;
p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); amdgpu_bo_list_for_each_entry(e, p->bo_list)
} e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
} else { } else {
gds = p->adev->gds.gds_gfx_bo; gds = p->adev->gds.gds_gfx_bo;
gws = p->adev->gds.gws_gfx_bo; gws = p->adev->gds.gws_gfx_bo;
...@@ -753,10 +746,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -753,10 +746,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
error_free_pages: error_free_pages:
if (p->bo_list) { if (p->bo_list) {
for (i = p->bo_list->first_userptr; amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
i < p->bo_list->num_entries; ++i) {
e = &p->bo_list->array[i];
if (!e->user_pages) if (!e->user_pages)
continue; continue;
...@@ -830,7 +820,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) ...@@ -830,7 +820,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
int i, r; int r;
r = amdgpu_vm_clear_freed(adev, vm, NULL); r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r) if (r)
...@@ -861,15 +851,17 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) ...@@ -861,15 +851,17 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
} }
if (p->bo_list) { if (p->bo_list) {
for (i = 0; i < p->bo_list->num_entries; i++) { struct amdgpu_bo_list_entry *e;
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
struct dma_fence *f; struct dma_fence *f;
/* ignore duplicates */ /* ignore duplicates */
bo = p->bo_list->array[i].robj; bo = e->robj;
if (!bo) if (!bo)
continue; continue;
bo_va = p->bo_list->array[i].bo_va; bo_va = e->bo_va;
if (bo_va == NULL) if (bo_va == NULL)
continue; continue;
...@@ -898,14 +890,15 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) ...@@ -898,14 +890,15 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
return r; return r;
if (amdgpu_vm_debug && p->bo_list) { if (amdgpu_vm_debug && p->bo_list) {
struct amdgpu_bo_list_entry *e;
/* Invalidate all BOs to test for userspace bugs */ /* Invalidate all BOs to test for userspace bugs */
for (i = 0; i < p->bo_list->num_entries; i++) { amdgpu_bo_list_for_each_entry(e, p->bo_list) {
/* ignore duplicates */ /* ignore duplicates */
bo = p->bo_list->array[i].robj; if (!e->robj)
if (!bo)
continue; continue;
amdgpu_vm_bo_invalidate(adev, bo, false); amdgpu_vm_bo_invalidate(adev, e->robj, false);
} }
} }
...@@ -1225,16 +1218,16 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1225,16 +1218,16 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
enum drm_sched_priority priority; enum drm_sched_priority priority;
struct amdgpu_job *job; struct amdgpu_job *job;
unsigned i;
uint64_t seq; uint64_t seq;
int r; int r;
amdgpu_mn_lock(p->mn); amdgpu_mn_lock(p->mn);
if (p->bo_list) { if (p->bo_list) {
for (i = p->bo_list->first_userptr; struct amdgpu_bo_list_entry *e;
i < p->bo_list->num_entries; ++i) {
struct amdgpu_bo *bo = p->bo_list->array[i].robj; amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = e->robj;
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
amdgpu_mn_unlock(p->mn); amdgpu_mn_unlock(p->mn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment