Commit 8223286d authored by Jordan Crouse's avatar Jordan Crouse Committed by Rob Clark

drm/msm: Add a helper function for in-kernel buffer allocations

Nearly all of the buffer allocations for kernel allocate an buffer object,
virtual address and GPU iova at the same time. Make a helper function to
handle the details.
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
[dropped msm_fbdev conversion to new helper, since it interferes with
display-handover work, where we want to separate allocation and mapping]
Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent 1267a4df
...@@ -284,28 +284,14 @@ static int a5xx_me_init(struct msm_gpu *gpu) ...@@ -284,28 +284,14 @@ static int a5xx_me_init(struct msm_gpu *gpu)
static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
const struct firmware *fw, u64 *iova) const struct firmware *fw, u64 *iova)
{ {
struct drm_device *drm = gpu->dev;
struct drm_gem_object *bo; struct drm_gem_object *bo;
void *ptr; void *ptr;
bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED); ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
if (IS_ERR(bo)) MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
return bo;
ptr = msm_gem_get_vaddr(bo);
if (!ptr) {
drm_gem_object_unreference(bo);
return ERR_PTR(-ENOMEM);
}
if (iova) {
int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
if (ret) { if (IS_ERR(ptr))
drm_gem_object_unreference(bo); return ERR_CAST(ptr);
return ERR_PTR(ret);
}
}
memcpy(ptr, &fw->data[4], fw->size - 4); memcpy(ptr, &fw->data[4], fw->size - 4);
......
...@@ -294,16 +294,10 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) ...@@ -294,16 +294,10 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
*/ */
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2; bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED); ptr = msm_gem_kernel_new_locked(drm, bosize,
if (IS_ERR(a5xx_gpu->gpmu_bo)) MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
goto err; &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
if (IS_ERR(ptr))
if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
&a5xx_gpu->gpmu_iova))
goto err;
ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
if (!ptr)
goto err; goto err;
while (cmds_size > 0) { while (cmds_size > 0) {
......
...@@ -391,29 +391,17 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -391,29 +391,17 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret; return ret;
} }
adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), adreno_gpu->memptrs = msm_gem_kernel_new(drm,
MSM_BO_UNCACHED); sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace,
if (IS_ERR(adreno_gpu->memptrs_bo)) { &adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova);
ret = PTR_ERR(adreno_gpu->memptrs_bo);
adreno_gpu->memptrs_bo = NULL;
dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
return ret;
}
adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
if (IS_ERR(adreno_gpu->memptrs)) { if (IS_ERR(adreno_gpu->memptrs)) {
dev_err(drm->dev, "could not vmap memptrs\n"); ret = PTR_ERR(adreno_gpu->memptrs);
return -ENOMEM; adreno_gpu->memptrs = NULL;
dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
} }
ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
&adreno_gpu->memptrs_iova);
if (ret) {
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
return ret; return ret;
}
return 0;
} }
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
......
...@@ -237,6 +237,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, ...@@ -237,6 +237,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags); uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
uint32_t size, uint32_t flags); uint32_t size, uint32_t flags);
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova);
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova);
struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt); struct dma_buf *dmabuf, struct sg_table *sgt);
......
...@@ -1024,3 +1024,49 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, ...@@ -1024,3 +1024,49 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
drm_gem_object_unreference_unlocked(obj); drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova, bool locked)
{
void *vaddr;
struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
int ret;
if (IS_ERR(obj))
return ERR_CAST(obj);
if (iova) {
ret = msm_gem_get_iova(obj, aspace, iova);
if (ret) {
drm_gem_object_unreference(obj);
return ERR_PTR(ret);
}
}
vaddr = msm_gem_get_vaddr(obj);
if (!vaddr) {
msm_gem_put_iova(obj, aspace);
drm_gem_object_unreference(obj);
return ERR_PTR(-ENOMEM);
}
if (bo)
*bo = obj;
return vaddr;
}
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova)
{
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
}
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova)
{
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
}
...@@ -33,16 +33,14 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) ...@@ -33,16 +33,14 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
} }
ring->gpu = gpu; ring->gpu = gpu;
ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
if (IS_ERR(ring->bo)) {
ret = PTR_ERR(ring->bo);
ring->bo = NULL;
goto fail;
}
ring->start = msm_gem_get_vaddr(ring->bo); /* Pass NULL for the iova pointer - we will map it later */
ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC,
gpu->aspace, &ring->bo, NULL);
if (IS_ERR(ring->start)) { if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start); ret = PTR_ERR(ring->start);
ring->start = 0;
goto fail; goto fail;
} }
ring->end = ring->start + (size / 4); ring->end = ring->start + (size / 4);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment