Commit 7ad0e8cf authored by Jordan Crouse's avatar Jordan Crouse Committed by Rob Clark

drm/msm: Count how many times iova memory is pinned

Add a reference count to track how many times a particular
chunk of iova memory is pinned (mapped) in the iomu and
add msm_gem_unpin_iova to give up references.

It is important to note that msm_gem_unpin_iova replaces
msm_gem_put_iova because the new implicit behavior
that an assigned iova in a given vma is now valid for the
life of the buffer and what we are really focusing on is
the use of that iova.

For now the unmappings are lazy; once the reference counts
go to zero they *COULD* be unmapped dynamically but that
will require an outside force such as a shrinker or
mm_notifiers.  For now, we're just focusing on getting
the counting right and setting ourselves up to be ready
for the future.
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent 9fe041f6
......@@ -130,15 +130,13 @@ reset_set(void *data, u64 val)
adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
if (a5xx_gpu->pfp_iova)
msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
......
......@@ -840,20 +840,17 @@ static void a5xx_destroy(struct msm_gpu *gpu)
a5xx_preempt_fini(gpu);
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
if (a5xx_gpu->pfp_iova)
msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo);
}
......
......@@ -736,8 +736,7 @@ static void a6xx_destroy(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
if (a6xx_gpu->sqe_bo) {
if (a6xx_gpu->sqe_iova)
msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
}
......
......@@ -152,7 +152,7 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu,
static void a6xx_crashdumper_free(struct msm_gpu *gpu,
struct a6xx_crashdumper *dumper)
{
msm_gem_put_iova(dumper->bo, gpu->aspace);
msm_gem_unpin_iova(dumper->bo, gpu->aspace);
msm_gem_put_vaddr(dumper->bo);
drm_gem_object_unreference(dumper->bo);
......
......@@ -128,7 +128,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
struct msm_kms *kms = &mdp4_kms->base.base;
msm_gem_put_iova(val, kms->aspace);
msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put_unlocked(val);
}
......
......@@ -165,7 +165,7 @@ static void mdp4_destroy(struct msm_kms *kms)
struct msm_gem_address_space *aspace = kms->aspace;
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
......
......@@ -173,7 +173,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
struct msm_kms *kms = &mdp5_kms->base.base;
msm_gem_put_iova(val, kms->aspace);
msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put_unlocked(val);
}
......
......@@ -1118,7 +1118,7 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
priv = dev->dev_private;
if (msm_host->tx_gem_obj) {
msm_gem_put_iova(msm_host->tx_gem_obj, priv->kms->aspace);
msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
msm_host->tx_gem_obj = NULL;
}
......
......@@ -243,10 +243,14 @@ void msm_atomic_state_free(struct drm_atomic_state *state);
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, int npages);
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma);
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
......@@ -275,10 +279,10 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
void msm_gem_put_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
......
......@@ -81,7 +81,7 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
int i, n = fb->format->num_planes;
for (i = 0; i < n; i++)
msm_gem_put_iova(fb->obj[i], aspace);
msm_gem_unpin_iova(fb->obj[i], aspace);
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
......
......@@ -352,7 +352,8 @@ put_iova(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&msm_obj->lock));
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
msm_gem_unmap_vma(vma->aspace, vma);
msm_gem_purge_vma(vma->aspace, vma);
msm_gem_close_vma(vma->aspace, vma);
del_vma(vma);
}
}
......@@ -430,7 +431,10 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
return ret;
}
/* Get an iova but don't pin the memory behind it */
/*
* Get an iova but don't pin it. Doesn't need a put because iovas are currently
* valid for the life of the object
*/
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
......@@ -444,7 +448,6 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
return ret;
}
/* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
*/
......@@ -462,15 +465,24 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
return vma ? vma->iova : 0;
}
void msm_gem_put_iova(struct drm_gem_object *obj,
/*
* Unpin a iova by updating the reference counts. The memory isn't actually
* purged until something else (shrinker, mm_notifier, destroy, etc) decides
* to get rid of it
*/
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
// XXX TODO ..
// NOTE: probably don't need a _locked() version.. we wouldn't
// normally unmap here, but instead just mark that it could be
// unmapped (if the iova refcnt drops to zero), but then later
// if another _get_iova_locked() fails we can start unmapping
// things that are no longer needed..
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
mutex_lock(&msm_obj->lock);
vma = lookup_vma(obj, aspace);
if (!WARN_ON(!vma))
msm_gem_unmap_vma(aspace, vma);
mutex_unlock(&msm_obj->lock);
}
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
......@@ -789,8 +801,9 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_puts(m, " vmas:");
list_for_each_entry(vma, &msm_obj->vmas, list)
seq_printf(m, " [%s: %08llx,%s]", vma->aspace->name,
vma->iova, vma->mapped ? "mapped" : "unmapped");
seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
vma->iova, vma->mapped ? "mapped" : "unmapped",
vma->inuse);
seq_puts(m, "\n");
}
......@@ -1093,7 +1106,7 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
msm_gem_put_iova(obj, aspace);
msm_gem_unpin_iova(obj, aspace);
ret = PTR_ERR(vaddr);
goto err;
}
......@@ -1133,7 +1146,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo,
return;
msm_gem_put_vaddr(bo);
msm_gem_put_iova(bo, aspace);
msm_gem_unpin_iova(bo, aspace);
if (locked)
drm_gem_object_put(bo);
......
......@@ -42,6 +42,7 @@ struct msm_gem_vma {
struct msm_gem_address_space *aspace;
struct list_head list; /* node in msm_gem_object::vmas */
bool mapped;
int inuse;
};
struct msm_gem_object {
......
......@@ -167,7 +167,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
......
......@@ -38,26 +38,32 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
kref_put(&aspace->kref, msm_gem_address_space_destroy);
}
void
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
/* Actually unmap memory for the vma */
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma)
{
if (!aspace || !vma->iova)
unsigned size = vma->node.size << PAGE_SHIFT;
/* Print a message if we try to purge a vma in use */
if (WARN_ON(vma->inuse > 0))
return;
if (aspace->mmu) {
unsigned size = vma->node.size << PAGE_SHIFT;
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
}
/* Don't do anything if the memory isn't mapped */
if (!vma->mapped)
return;
spin_lock(&aspace->lock);
drm_mm_remove_node(&vma->node);
spin_unlock(&aspace->lock);
if (aspace->mmu)
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
vma->iova = 0;
vma->mapped = false;
}
msm_gem_address_space_put(aspace);
/* Remove reference counts for the mapping */
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma)
{
if (!WARN_ON(!vma->iova))
vma->inuse--;
}
int
......@@ -70,6 +76,9 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
if (WARN_ON(!vma->iova))
return -EINVAL;
/* Increase the usage counter */
vma->inuse++;
if (vma->mapped)
return 0;
......@@ -85,6 +94,23 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
return ret;
}
/* Close an iova. Warn if it is still in use */
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma)
{
if (WARN_ON(vma->inuse > 0 || vma->mapped))
return;
spin_lock(&aspace->lock);
if (vma->iova)
drm_mm_remove_node(&vma->node);
spin_unlock(&aspace->lock);
vma->iova = 0;
msm_gem_address_space_put(aspace);
}
/* Initialize a new vma and allocate an iova for it */
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, int npages)
......@@ -109,6 +135,7 @@ int msm_gem_init_vma(struct msm_gem_address_space *aspace,
return 0;
}
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)
......
......@@ -689,7 +689,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
struct msm_gem_object *msm_obj = submit->bos[i].obj;
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
msm_gem_put_iova(&msm_obj->base, gpu->aspace);
msm_gem_unpin_iova(&msm_obj->base, gpu->aspace);
drm_gem_object_put(&msm_obj->base);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment