Commit bdefca2d authored by Boris Brezillon's avatar Boris Brezillon Committed by Rob Herring

drm/panfrost: Add the panfrost_gem_mapping concept

With the introduction of per-FD address space, the same BO can be mapped
in different address space if the BO is globally visible (GEM_FLINK)
and opened in different context or if the dmabuf is self-imported. The
current implementation does not take case into account, and attaches the
mapping directly to the panfrost_gem_object.

Let's create a panfrost_gem_mapping struct and allow multiple mappings
per BO.

The mappings are refcounted which helps solve another problem where
mappings were torn down (GEM handle closed by userspace) while GPU
jobs accessing those BOs were still in-flight. Jobs now keep a
reference on the mappings they use.

v2 (robh):
- Minor review comment clean-ups from Steven
- Use list_is_singular helper
- Just WARN if we add a mapping when madvise state is not WILLNEED.
  With that, drop the use of object_name_lock.

v3 (robh):
- Revert returning list iterator in panfrost_gem_mapping_get()

Fixes: a5efb4c9 ("drm/panfrost: Restructure the GEM object creation")
Fixes: 7282f764 ("drm/panfrost: Implement per FD address spaces")
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarBoris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: default avatarRob Herring <robh@kernel.org>
Acked-by: default avatarBoris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: default avatarSteven Price <steven.price@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200116021554.15090-1-robh@kernel.org
parent db1a0795
...@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct ...@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
struct drm_file *file) struct drm_file *file)
{ {
struct panfrost_file_priv *priv = file->driver_priv;
struct panfrost_gem_object *bo; struct panfrost_gem_object *bo;
struct drm_panfrost_create_bo *args = data; struct drm_panfrost_create_bo *args = data;
struct panfrost_gem_mapping *mapping;
if (!args->size || args->pad || if (!args->size || args->pad ||
(args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
...@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, ...@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
if (IS_ERR(bo)) if (IS_ERR(bo))
return PTR_ERR(bo); return PTR_ERR(bo);
args->offset = bo->node.start << PAGE_SHIFT; mapping = panfrost_gem_mapping_get(bo, priv);
if (!mapping) {
drm_gem_object_put_unlocked(&bo->base.base);
return -EINVAL;
}
args->offset = mapping->mmnode.start << PAGE_SHIFT;
panfrost_gem_mapping_put(mapping);
return 0; return 0;
} }
...@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev, ...@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev,
struct drm_panfrost_submit *args, struct drm_panfrost_submit *args,
struct panfrost_job *job) struct panfrost_job *job)
{ {
struct panfrost_file_priv *priv = file_priv->driver_priv;
struct panfrost_gem_object *bo;
unsigned int i;
int ret;
job->bo_count = args->bo_handle_count; job->bo_count = args->bo_handle_count;
if (!job->bo_count) if (!job->bo_count)
...@@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev, ...@@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev,
if (!job->implicit_fences) if (!job->implicit_fences)
return -ENOMEM; return -ENOMEM;
return drm_gem_objects_lookup(file_priv, ret = drm_gem_objects_lookup(file_priv,
(void __user *)(uintptr_t)args->bo_handles, (void __user *)(uintptr_t)args->bo_handles,
job->bo_count, &job->bos); job->bo_count, &job->bos);
if (ret)
return ret;
job->mappings = kvmalloc_array(job->bo_count,
sizeof(struct panfrost_gem_mapping *),
GFP_KERNEL | __GFP_ZERO);
if (!job->mappings)
return -ENOMEM;
for (i = 0; i < job->bo_count; i++) {
struct panfrost_gem_mapping *mapping;
bo = to_panfrost_bo(job->bos[i]);
mapping = panfrost_gem_mapping_get(bo, priv);
if (!mapping) {
ret = -EINVAL;
break;
}
job->mappings[i] = mapping;
}
return ret;
} }
/** /**
...@@ -320,7 +357,9 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data, ...@@ -320,7 +357,9 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_get_bo_offset *args = data; struct drm_panfrost_get_bo_offset *args = data;
struct panfrost_gem_mapping *mapping;
struct drm_gem_object *gem_obj; struct drm_gem_object *gem_obj;
struct panfrost_gem_object *bo; struct panfrost_gem_object *bo;
...@@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, ...@@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
} }
bo = to_panfrost_bo(gem_obj); bo = to_panfrost_bo(gem_obj);
args->offset = bo->node.start << PAGE_SHIFT; mapping = panfrost_gem_mapping_get(bo, priv);
drm_gem_object_put_unlocked(gem_obj); drm_gem_object_put_unlocked(gem_obj);
if (!mapping)
return -EINVAL;
args->offset = mapping->mmnode.start << PAGE_SHIFT;
panfrost_gem_mapping_put(mapping);
return 0; return 0;
} }
static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_madvise *args = data; struct drm_panfrost_madvise *args = data;
struct panfrost_device *pfdev = dev->dev_private; struct panfrost_device *pfdev = dev->dev_private;
struct drm_gem_object *gem_obj; struct drm_gem_object *gem_obj;
struct panfrost_gem_object *bo;
int ret = 0;
gem_obj = drm_gem_object_lookup(file_priv, args->handle); gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) { if (!gem_obj) {
...@@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, ...@@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
return -ENOENT; return -ENOENT;
} }
bo = to_panfrost_bo(gem_obj);
mutex_lock(&pfdev->shrinker_lock); mutex_lock(&pfdev->shrinker_lock);
mutex_lock(&bo->mappings.lock);
if (args->madv == PANFROST_MADV_DONTNEED) {
struct panfrost_gem_mapping *first;
first = list_first_entry(&bo->mappings.list,
struct panfrost_gem_mapping,
node);
/*
* If we want to mark the BO purgeable, there must be only one
* user: the caller FD.
* We could do something smarter and mark the BO purgeable only
* when all its users have marked it purgeable, but globally
* visible/shared BOs are likely to never be marked purgeable
* anyway, so let's not bother.
*/
if (!list_is_singular(&bo->mappings.list) ||
WARN_ON_ONCE(first->mmu != &priv->mmu)) {
ret = -EINVAL;
goto out_unlock_mappings;
}
}
args->retained = drm_gem_shmem_madvise(gem_obj, args->madv); args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
if (args->retained) { if (args->retained) {
struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
if (args->madv == PANFROST_MADV_DONTNEED) if (args->madv == PANFROST_MADV_DONTNEED)
list_add_tail(&bo->base.madv_list, list_add_tail(&bo->base.madv_list,
&pfdev->shrinker_list); &pfdev->shrinker_list);
else if (args->madv == PANFROST_MADV_WILLNEED) else if (args->madv == PANFROST_MADV_WILLNEED)
list_del_init(&bo->base.madv_list); list_del_init(&bo->base.madv_list);
} }
out_unlock_mappings:
mutex_unlock(&bo->mappings.lock);
mutex_unlock(&pfdev->shrinker_lock); mutex_unlock(&pfdev->shrinker_lock);
drm_gem_object_put_unlocked(gem_obj); drm_gem_object_put_unlocked(gem_obj);
return 0; return ret;
} }
int panfrost_unstable_ioctl_check(void) int panfrost_unstable_ioctl_check(void)
......
...@@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) ...@@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
list_del_init(&bo->base.madv_list); list_del_init(&bo->base.madv_list);
mutex_unlock(&pfdev->shrinker_lock); mutex_unlock(&pfdev->shrinker_lock);
/*
* If we still have mappings attached to the BO, there's a problem in
* our refcounting.
*/
WARN_ON_ONCE(!list_empty(&bo->mappings.list));
if (bo->sgts) { if (bo->sgts) {
int i; int i;
int n_sgt = bo->base.base.size / SZ_2M; int n_sgt = bo->base.base.size / SZ_2M;
...@@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) ...@@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
drm_gem_shmem_free_object(obj); drm_gem_shmem_free_object(obj);
} }
struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
struct panfrost_file_priv *priv)
{
struct panfrost_gem_mapping *iter, *mapping = NULL;
mutex_lock(&bo->mappings.lock);
list_for_each_entry(iter, &bo->mappings.list, node) {
if (iter->mmu == &priv->mmu) {
kref_get(&iter->refcount);
mapping = iter;
break;
}
}
mutex_unlock(&bo->mappings.lock);
return mapping;
}
static void
panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
{
struct panfrost_file_priv *priv;
if (mapping->active)
panfrost_mmu_unmap(mapping);
priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
spin_lock(&priv->mm_lock);
if (drm_mm_node_allocated(&mapping->mmnode))
drm_mm_remove_node(&mapping->mmnode);
spin_unlock(&priv->mm_lock);
}
static void panfrost_gem_mapping_release(struct kref *kref)
{
struct panfrost_gem_mapping *mapping;
mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
panfrost_gem_teardown_mapping(mapping);
drm_gem_object_put_unlocked(&mapping->obj->base.base);
kfree(mapping);
}
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
{
if (!mapping)
return;
kref_put(&mapping->refcount, panfrost_gem_mapping_release);
}
void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
{
struct panfrost_gem_mapping *mapping;
mutex_lock(&bo->mappings.lock);
list_for_each_entry(mapping, &bo->mappings.list, node)
panfrost_gem_teardown_mapping(mapping);
mutex_unlock(&bo->mappings.lock);
}
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{ {
int ret; int ret;
...@@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) ...@@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
struct panfrost_gem_object *bo = to_panfrost_bo(obj); struct panfrost_gem_object *bo = to_panfrost_bo(obj);
unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0; unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
struct panfrost_file_priv *priv = file_priv->driver_priv; struct panfrost_file_priv *priv = file_priv->driver_priv;
struct panfrost_gem_mapping *mapping;
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping)
return -ENOMEM;
INIT_LIST_HEAD(&mapping->node);
kref_init(&mapping->refcount);
drm_gem_object_get(obj);
mapping->obj = bo;
/* /*
* Executable buffers cannot cross a 16MB boundary as the program * Executable buffers cannot cross a 16MB boundary as the program
...@@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) ...@@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
else else
align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
bo->mmu = &priv->mmu; mapping->mmu = &priv->mmu;
spin_lock(&priv->mm_lock); spin_lock(&priv->mm_lock);
ret = drm_mm_insert_node_generic(&priv->mm, &bo->node, ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
size >> PAGE_SHIFT, align, color, 0); size >> PAGE_SHIFT, align, color, 0);
spin_unlock(&priv->mm_lock); spin_unlock(&priv->mm_lock);
if (ret) if (ret)
return ret; goto err;
if (!bo->is_heap) { if (!bo->is_heap) {
ret = panfrost_mmu_map(bo); ret = panfrost_mmu_map(mapping);
if (ret) { if (ret)
spin_lock(&priv->mm_lock); goto err;
drm_mm_remove_node(&bo->node);
spin_unlock(&priv->mm_lock);
}
} }
mutex_lock(&bo->mappings.lock);
WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
list_add_tail(&mapping->node, &bo->mappings.list);
mutex_unlock(&bo->mappings.lock);
err:
if (ret)
panfrost_gem_mapping_put(mapping);
return ret; return ret;
} }
void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv) void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
{ {
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_file_priv *priv = file_priv->driver_priv; struct panfrost_file_priv *priv = file_priv->driver_priv;
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_gem_mapping *mapping = NULL, *iter;
if (bo->is_mapped) mutex_lock(&bo->mappings.lock);
panfrost_mmu_unmap(bo); list_for_each_entry(iter, &bo->mappings.list, node) {
if (iter->mmu == &priv->mmu) {
mapping = iter;
list_del(&iter->node);
break;
}
}
mutex_unlock(&bo->mappings.lock);
spin_lock(&priv->mm_lock); panfrost_gem_mapping_put(mapping);
if (drm_mm_node_allocated(&bo->node))
drm_mm_remove_node(&bo->node);
spin_unlock(&priv->mm_lock);
} }
static int panfrost_gem_pin(struct drm_gem_object *obj) static int panfrost_gem_pin(struct drm_gem_object *obj)
...@@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t ...@@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
if (!obj) if (!obj)
return NULL; return NULL;
INIT_LIST_HEAD(&obj->mappings.list);
mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs; obj->base.base.funcs = &panfrost_gem_funcs;
return &obj->base.base; return &obj->base.base;
......
...@@ -13,23 +13,46 @@ struct panfrost_gem_object { ...@@ -13,23 +13,46 @@ struct panfrost_gem_object {
struct drm_gem_shmem_object base; struct drm_gem_shmem_object base;
struct sg_table *sgts; struct sg_table *sgts;
struct panfrost_mmu *mmu; /*
struct drm_mm_node node; * Use a list for now. If searching a mapping ever becomes the
bool is_mapped :1; * bottleneck, we should consider using an RB-tree, or even better,
* let the core store drm_gem_object_mapping entries (where we
* could place driver specific data) instead of drm_gem_object ones
* in its drm_file->object_idr table.
*
* struct drm_gem_object_mapping {
* struct drm_gem_object *obj;
* void *driver_priv;
* };
*/
struct {
struct list_head list;
struct mutex lock;
} mappings;
bool noexec :1; bool noexec :1;
bool is_heap :1; bool is_heap :1;
}; };
struct panfrost_gem_mapping {
struct list_head node;
struct kref refcount;
struct panfrost_gem_object *obj;
struct drm_mm_node mmnode;
struct panfrost_mmu *mmu;
bool active :1;
};
static inline static inline
struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj) struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
{ {
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base); return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
} }
static inline static inline struct panfrost_gem_mapping *
struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node) drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
{ {
return container_of(node, struct panfrost_gem_object, node); return container_of(node, struct panfrost_gem_mapping, mmnode);
} }
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size); struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
...@@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv); ...@@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
void panfrost_gem_close(struct drm_gem_object *obj, void panfrost_gem_close(struct drm_gem_object *obj,
struct drm_file *file_priv); struct drm_file *file_priv);
struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
struct panfrost_file_priv *priv);
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
void panfrost_gem_shrinker_init(struct drm_device *dev); void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev); void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
......
...@@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc ...@@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc
static bool panfrost_gem_purge(struct drm_gem_object *obj) static bool panfrost_gem_purge(struct drm_gem_object *obj)
{ {
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
if (!mutex_trylock(&shmem->pages_lock)) if (!mutex_trylock(&shmem->pages_lock))
return false; return false;
panfrost_mmu_unmap(to_panfrost_bo(obj)); panfrost_gem_teardown_mappings(bo);
drm_gem_shmem_purge_locked(obj); drm_gem_shmem_purge_locked(obj);
mutex_unlock(&shmem->pages_lock); mutex_unlock(&shmem->pages_lock);
......
...@@ -268,9 +268,20 @@ static void panfrost_job_cleanup(struct kref *ref) ...@@ -268,9 +268,20 @@ static void panfrost_job_cleanup(struct kref *ref)
dma_fence_put(job->done_fence); dma_fence_put(job->done_fence);
dma_fence_put(job->render_done_fence); dma_fence_put(job->render_done_fence);
if (job->bos) { if (job->mappings) {
for (i = 0; i < job->bo_count; i++) for (i = 0; i < job->bo_count; i++)
panfrost_gem_mapping_put(job->mappings[i]);
kvfree(job->mappings);
}
if (job->bos) {
struct panfrost_gem_object *bo;
for (i = 0; i < job->bo_count; i++) {
bo = to_panfrost_bo(job->bos[i]);
drm_gem_object_put_unlocked(job->bos[i]); drm_gem_object_put_unlocked(job->bos[i]);
}
kvfree(job->bos); kvfree(job->bos);
} }
......
...@@ -32,6 +32,7 @@ struct panfrost_job { ...@@ -32,6 +32,7 @@ struct panfrost_job {
/* Exclusive fences we have taken from the BOs to wait for */ /* Exclusive fences we have taken from the BOs to wait for */
struct dma_fence **implicit_fences; struct dma_fence **implicit_fences;
struct panfrost_gem_mapping **mappings;
struct drm_gem_object **bos; struct drm_gem_object **bos;
u32 bo_count; u32 bo_count;
......
...@@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, ...@@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
return 0; return 0;
} }
int panfrost_mmu_map(struct panfrost_gem_object *bo) int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
{ {
struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base; struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev); struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt; struct sg_table *sgt;
int prot = IOMMU_READ | IOMMU_WRITE; int prot = IOMMU_READ | IOMMU_WRITE;
if (WARN_ON(bo->is_mapped)) if (WARN_ON(mapping->active))
return 0; return 0;
if (bo->noexec) if (bo->noexec)
...@@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) ...@@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
if (WARN_ON(IS_ERR(sgt))) if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt); return PTR_ERR(sgt);
mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt); mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
bo->is_mapped = true; prot, sgt);
mapping->active = true;
return 0; return 0;
} }
void panfrost_mmu_unmap(struct panfrost_gem_object *bo) void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
{ {
struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base; struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev); struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops; struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
u64 iova = bo->node.start << PAGE_SHIFT; u64 iova = mapping->mmnode.start << PAGE_SHIFT;
size_t len = bo->node.size << PAGE_SHIFT; size_t len = mapping->mmnode.size << PAGE_SHIFT;
size_t unmapped_len = 0; size_t unmapped_len = 0;
if (WARN_ON(!bo->is_mapped)) if (WARN_ON(!mapping->active))
return; return;
dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len); dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
mapping->mmu->as, iova, len);
while (unmapped_len < len) { while (unmapped_len < len) {
size_t unmapped_page; size_t unmapped_page;
...@@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) ...@@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
unmapped_len += pgsize; unmapped_len += pgsize;
} }
panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len); panfrost_mmu_flush_range(pfdev, mapping->mmu,
bo->is_mapped = false; mapping->mmnode.start << PAGE_SHIFT, len);
mapping->active = false;
} }
static void mmu_tlb_inv_context_s1(void *cookie) static void mmu_tlb_inv_context_s1(void *cookie)
...@@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv) ...@@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
free_io_pgtable_ops(mmu->pgtbl_ops); free_io_pgtable_ops(mmu->pgtbl_ops);
} }
static struct panfrost_gem_object * static struct panfrost_gem_mapping *
addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
{ {
struct panfrost_gem_object *bo = NULL; struct panfrost_gem_mapping *mapping = NULL;
struct panfrost_file_priv *priv; struct panfrost_file_priv *priv;
struct drm_mm_node *node; struct drm_mm_node *node;
u64 offset = addr >> PAGE_SHIFT; u64 offset = addr >> PAGE_SHIFT;
...@@ -418,8 +423,9 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) ...@@ -418,8 +423,9 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
drm_mm_for_each_node(node, &priv->mm) { drm_mm_for_each_node(node, &priv->mm) {
if (offset >= node->start && if (offset >= node->start &&
offset < (node->start + node->size)) { offset < (node->start + node->size)) {
bo = drm_mm_node_to_panfrost_bo(node); mapping = drm_mm_node_to_panfrost_mapping(node);
drm_gem_object_get(&bo->base.base);
kref_get(&mapping->refcount);
break; break;
} }
} }
...@@ -427,7 +433,7 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) ...@@ -427,7 +433,7 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
spin_unlock(&priv->mm_lock); spin_unlock(&priv->mm_lock);
out: out:
spin_unlock(&pfdev->as_lock); spin_unlock(&pfdev->as_lock);
return bo; return mapping;
} }
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
...@@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
u64 addr) u64 addr)
{ {
int ret, i; int ret, i;
struct panfrost_gem_mapping *bomapping;
struct panfrost_gem_object *bo; struct panfrost_gem_object *bo;
struct address_space *mapping; struct address_space *mapping;
pgoff_t page_offset; pgoff_t page_offset;
struct sg_table *sgt; struct sg_table *sgt;
struct page **pages; struct page **pages;
bo = addr_to_drm_mm_node(pfdev, as, addr); bomapping = addr_to_mapping(pfdev, as, addr);
if (!bo) if (!bomapping)
return -ENOENT; return -ENOENT;
bo = bomapping->obj;
if (!bo->is_heap) { if (!bo->is_heap) {
dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
bo->node.start << PAGE_SHIFT); bomapping->mmnode.start << PAGE_SHIFT);
ret = -EINVAL; ret = -EINVAL;
goto err_bo; goto err_bo;
} }
WARN_ON(bo->mmu->as != as); WARN_ON(bomapping->mmu->as != as);
/* Assume 2MB alignment and size multiple */ /* Assume 2MB alignment and size multiple */
addr &= ~((u64)SZ_2M - 1); addr &= ~((u64)SZ_2M - 1);
page_offset = addr >> PAGE_SHIFT; page_offset = addr >> PAGE_SHIFT;
page_offset -= bo->node.start; page_offset -= bomapping->mmnode.start;
mutex_lock(&bo->base.pages_lock); mutex_lock(&bo->base.pages_lock);
...@@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_map; goto err_map;
} }
mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); mmu_map_sg(pfdev, bomapping->mmu, addr,
IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
bo->is_mapped = true; bomapping->active = true;
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
drm_gem_object_put_unlocked(&bo->base.base); panfrost_gem_mapping_put(bomapping);
return 0; return 0;
......
...@@ -4,12 +4,12 @@ ...@@ -4,12 +4,12 @@
#ifndef __PANFROST_MMU_H__ #ifndef __PANFROST_MMU_H__
#define __PANFROST_MMU_H__ #define __PANFROST_MMU_H__
struct panfrost_gem_object; struct panfrost_gem_mapping;
struct panfrost_file_priv; struct panfrost_file_priv;
struct panfrost_mmu; struct panfrost_mmu;
int panfrost_mmu_map(struct panfrost_gem_object *bo); int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
void panfrost_mmu_unmap(struct panfrost_gem_object *bo); void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
int panfrost_mmu_init(struct panfrost_device *pfdev); int panfrost_mmu_init(struct panfrost_device *pfdev);
void panfrost_mmu_fini(struct panfrost_device *pfdev); void panfrost_mmu_fini(struct panfrost_device *pfdev);
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#define V4_SHADERS_PER_COREGROUP 4 #define V4_SHADERS_PER_COREGROUP 4
struct panfrost_perfcnt { struct panfrost_perfcnt {
struct panfrost_gem_object *bo; struct panfrost_gem_mapping *mapping;
size_t bosize; size_t bosize;
void *buf; void *buf;
struct panfrost_file_priv *user; struct panfrost_file_priv *user;
...@@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev) ...@@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
int ret; int ret;
reinit_completion(&pfdev->perfcnt->dump_comp); reinit_completion(&pfdev->perfcnt->dump_comp);
gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT; gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva); gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32); gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
gpu_write(pfdev, GPU_INT_CLEAR, gpu_write(pfdev, GPU_INT_CLEAR,
...@@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, ...@@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
if (IS_ERR(bo)) if (IS_ERR(bo))
return PTR_ERR(bo); return PTR_ERR(bo);
perfcnt->bo = to_panfrost_bo(&bo->base);
/* Map the perfcnt buf in the address space attached to file_priv. */ /* Map the perfcnt buf in the address space attached to file_priv. */
ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv); ret = panfrost_gem_open(&bo->base, file_priv);
if (ret) if (ret)
goto err_put_bo; goto err_put_bo;
perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
user);
if (!perfcnt->mapping) {
ret = -EINVAL;
goto err_close_bo;
}
perfcnt->buf = drm_gem_shmem_vmap(&bo->base); perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
if (IS_ERR(perfcnt->buf)) { if (IS_ERR(perfcnt->buf)) {
ret = PTR_ERR(perfcnt->buf); ret = PTR_ERR(perfcnt->buf);
goto err_close_bo; goto err_put_mapping;
} }
/* /*
...@@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, ...@@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186)) if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff); gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
/* The BO ref is retained by the mapping. */
drm_gem_object_put_unlocked(&bo->base);
return 0; return 0;
err_vunmap: err_vunmap:
drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
err_put_mapping:
panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo: err_close_bo:
panfrost_gem_close(&perfcnt->bo->base.base, file_priv); panfrost_gem_close(&bo->base, file_priv);
err_put_bo: err_put_bo:
drm_gem_object_put_unlocked(&bo->base); drm_gem_object_put_unlocked(&bo->base);
return ret; return ret;
...@@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, ...@@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL; perfcnt->user = NULL;
drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
perfcnt->buf = NULL; perfcnt->buf = NULL;
panfrost_gem_close(&perfcnt->bo->base.base, file_priv); panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
drm_gem_object_put_unlocked(&perfcnt->bo->base.base); panfrost_gem_mapping_put(perfcnt->mapping);
perfcnt->bo = NULL; perfcnt->mapping = NULL;
pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment