Commit 2f2aa137 authored by Gerd Hoffmann's avatar Gerd Hoffmann

drm/virtio: move virtio_gpu_mem_entry initialization to new function

Introduce new virtio_gpu_object_shmem_init() helper function which will
create the virtio_gpu_mem_entry array, containing the backing storage
information for the host.  For the most path this just moves code from
virtio_gpu_object_attach().
Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20200207074638.26386-5-kraxel@redhat.comReviewed-by: default avatarChia-I Wu <olvaffe@gmail.com>
parent 2fe4ca9d
......@@ -71,6 +71,7 @@ struct virtio_gpu_object {
struct sg_table *pages;
uint32_t mapped;
bool dumb;
bool created;
};
......@@ -281,7 +282,8 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t x, uint32_t y);
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
struct virtio_gpu_fence *fence);
struct virtio_gpu_mem_entry *ents,
unsigned int nents);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
......
......@@ -121,6 +121,51 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
return &bo->base.base;
}
static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_mem_entry **ents,
unsigned int *nents)
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct scatterlist *sg;
int si, ret;
ret = drm_gem_shmem_pin(&bo->base.base);
if (ret < 0)
return -EINVAL;
bo->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
if (!bo->pages) {
drm_gem_shmem_unpin(&bo->base.base);
return -EINVAL;
}
if (use_dma_api) {
bo->mapped = dma_map_sg(vgdev->vdev->dev.parent,
bo->pages->sgl, bo->pages->nents,
DMA_TO_DEVICE);
*nents = bo->mapped;
} else {
*nents = bo->pages->nents;
}
*ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
GFP_KERNEL);
if (!(*ents)) {
DRM_ERROR("failed to allocate ent list\n");
return -ENOMEM;
}
for_each_sg(bo->pages->sgl, sg, *nents, si) {
(*ents)[si].addr = cpu_to_le64(use_dma_api
? sg_dma_address(sg)
: sg_phys(sg));
(*ents)[si].length = cpu_to_le32(sg->length);
(*ents)[si].padding = 0;
}
return 0;
}
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
......@@ -129,6 +174,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs = NULL;
struct drm_gem_shmem_object *shmem_obj;
struct virtio_gpu_object *bo;
struct virtio_gpu_mem_entry *ents;
unsigned int nents;
int ret;
*bo_ptr = NULL;
......@@ -165,7 +212,13 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
objs, fence);
}
ret = virtio_gpu_object_attach(vgdev, bo, NULL);
ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
if (ret != 0) {
virtio_gpu_free_object(&shmem_obj->base);
return ret;
}
ret = virtio_gpu_object_attach(vgdev, bo, ents, nents);
if (ret != 0) {
virtio_gpu_free_object(&shmem_obj->base);
return ret;
......
......@@ -1089,56 +1089,11 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
struct virtio_gpu_fence *fence)
struct virtio_gpu_mem_entry *ents,
unsigned int nents)
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents;
struct scatterlist *sg;
int si, nents, ret;
if (WARN_ON_ONCE(!obj->created))
return -EINVAL;
if (WARN_ON_ONCE(obj->pages))
return -EINVAL;
ret = drm_gem_shmem_pin(&obj->base.base);
if (ret < 0)
return -EINVAL;
obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
if (obj->pages == NULL) {
drm_gem_shmem_unpin(&obj->base.base);
return -EINVAL;
}
if (use_dma_api) {
obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
obj->pages->sgl, obj->pages->nents,
DMA_TO_DEVICE);
nents = obj->mapped;
} else {
nents = obj->pages->nents;
}
/* gets freed when the ring has consumed it */
ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
GFP_KERNEL);
if (!ents) {
DRM_ERROR("failed to allocate ent list\n");
return -ENOMEM;
}
for_each_sg(obj->pages->sgl, sg, nents, si) {
ents[si].addr = cpu_to_le64(use_dma_api
? sg_dma_address(sg)
: sg_phys(sg));
ents[si].length = cpu_to_le32(sg->length);
ents[si].padding = 0;
}
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents,
fence);
ents, nents, NULL);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment