Commit 27674c66 authored by Rob Clark's avatar Rob Clark

drm/msm/gem: Split vma lookup and pin

This way we only lookup vma once per object per submit, for both the
submit and retire path.
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Link: https://lore.kernel.org/r/20220411215849.297838-9-robdclark@gmail.comSigned-off-by: default avatarRob Clark <robdclark@chromium.org>
parent 522f1abf
......@@ -407,7 +407,7 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
return vma;
}
static int msm_gem_pin_iova(struct drm_gem_object *obj, struct msm_gem_vma *vma)
int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **pages;
......@@ -439,6 +439,26 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, struct msm_gem_vma *vma)
return ret;
}
void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_gem_is_locked(obj));
msm_gem_unmap_vma(vma->aspace, vma);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
update_inactive(msm_obj);
}
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
return get_vma_locked(obj, aspace, 0, U64_MAX);
}
static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova,
u64 range_start, u64 range_end)
......@@ -452,7 +472,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
ret = msm_gem_pin_iova(obj, vma);
ret = msm_gem_pin_vma_locked(obj, vma);
if (!ret)
*iova = vma->iova;
......@@ -476,12 +496,6 @@ int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
return ret;
}
int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
}
/* get iova and pin it. Should have a matching put */
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
......@@ -511,29 +525,6 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
return ret;
}
/*
* Locked variant of msm_gem_unpin_iova()
*/
void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
GEM_WARN_ON(!msm_gem_is_locked(obj));
vma = lookup_vma(obj, aspace);
if (!GEM_WARN_ON(!vma)) {
msm_gem_unmap_vma(aspace, vma);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
update_inactive(msm_obj);
}
}
/*
* Unpin a iova by updating the reference counts. The memory isn't actually
* purged until something else (shrinker, mm_notifier, destroy, etc) decides
......@@ -542,8 +533,13 @@ void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
struct msm_gem_vma *vma;
msm_gem_lock(obj);
msm_gem_unpin_iova_locked(obj, aspace);
vma = lookup_vma(obj, aspace);
if (!GEM_WARN_ON(!vma)) {
msm_gem_unpin_vma_locked(obj, vma);
}
msm_gem_unlock(obj);
}
......
......@@ -133,17 +133,17 @@ struct msm_gem_object {
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova,
u64 range_start, u64 range_end);
int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
......@@ -369,6 +369,7 @@ struct msm_gem_submit {
uint32_t handle;
};
uint64_t iova;
struct msm_gem_vma *vma;
} bos[];
};
......
......@@ -232,7 +232,7 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
unsigned flags = submit->bos[i].flags & cleanup_flags;
if (flags & BO_PINNED)
msm_gem_unpin_iova_locked(obj, submit->aspace);
msm_gem_unpin_vma_locked(obj, submit->bos[i].vma);
if (flags & BO_ACTIVE)
msm_gem_active_put(obj);
......@@ -365,21 +365,26 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
uint64_t iova;
struct msm_gem_vma *vma;
/* if locking succeeded, pin bo: */
ret = msm_gem_get_and_pin_iova_locked(obj,
submit->aspace, &iova);
vma = msm_gem_get_vma_locked(obj, submit->aspace);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
break;
}
ret = msm_gem_pin_vma_locked(obj, vma);
if (ret)
break;
submit->bos[i].flags |= BO_PINNED;
submit->bos[i].vma = vma;
if (iova == submit->bos[i].iova) {
if (vma->iova == submit->bos[i].iova) {
submit->bos[i].flags |= BO_VALID;
} else {
submit->bos[i].iova = iova;
submit->bos[i].iova = vma->iova;
/* iova changed, so address in cmdstream is not valid: */
submit->bos[i].flags &= ~BO_VALID;
submit->valid = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment