Commit f651c8b0 authored by Gurchetan Singh's avatar Gurchetan Singh Committed by Gerd Hoffmann

drm/virtio: factor out the sg_table from virtio_gpu_object

A resource will be a shmem based resource or a (planned)
vram based resource, so it makes sense to factor out common fields
(resource handle, dumb).

v2: move mapped field to shmem object
Signed-off-by: default avatarGurchetan Singh <gurchetansingh@chromium.org>
Link: http://patchwork.freedesktop.org/patch/msgid/20200305013212.130640-1-gurchetansingh@chromium.orgSigned-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
parent ee21ec77
...@@ -69,16 +69,21 @@ struct virtio_gpu_object_params { ...@@ -69,16 +69,21 @@ struct virtio_gpu_object_params {
struct virtio_gpu_object { struct virtio_gpu_object {
struct drm_gem_shmem_object base; struct drm_gem_shmem_object base;
uint32_t hw_res_handle; uint32_t hw_res_handle;
struct sg_table *pages;
uint32_t mapped;
bool dumb; bool dumb;
bool created; bool created;
}; };
#define gem_to_virtio_gpu_obj(gobj) \ #define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, base.base) container_of((gobj), struct virtio_gpu_object, base.base)
struct virtio_gpu_object_shmem {
struct virtio_gpu_object base;
struct sg_table *pages;
uint32_t mapped;
};
#define to_virtio_gpu_shmem(virtio_gpu_object) \
container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base)
struct virtio_gpu_object_array { struct virtio_gpu_object_array {
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct list_head next; struct list_head next;
......
...@@ -65,16 +65,17 @@ static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t ...@@ -65,16 +65,17 @@ static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
{ {
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (bo->pages) { if (shmem->pages) {
if (bo->mapped) { if (shmem->mapped) {
dma_unmap_sg(vgdev->vdev->dev.parent, dma_unmap_sg(vgdev->vdev->dev.parent,
bo->pages->sgl, bo->mapped, shmem->pages->sgl, shmem->mapped,
DMA_TO_DEVICE); DMA_TO_DEVICE);
bo->mapped = 0; shmem->mapped = 0;
} }
sg_free_table(bo->pages); sg_free_table(shmem->pages);
bo->pages = NULL; shmem->pages = NULL;
drm_gem_shmem_unpin(&bo->base.base); drm_gem_shmem_unpin(&bo->base.base);
} }
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
...@@ -133,6 +134,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, ...@@ -133,6 +134,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
unsigned int *nents) unsigned int *nents)
{ {
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
struct scatterlist *sg; struct scatterlist *sg;
int si, ret; int si, ret;
...@@ -140,19 +142,20 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, ...@@ -140,19 +142,20 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
if (ret < 0) if (ret < 0)
return -EINVAL; return -EINVAL;
bo->pages = drm_gem_shmem_get_sg_table(&bo->base.base); shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
if (!bo->pages) { if (!shmem->pages) {
drm_gem_shmem_unpin(&bo->base.base); drm_gem_shmem_unpin(&bo->base.base);
return -EINVAL; return -EINVAL;
} }
if (use_dma_api) { if (use_dma_api) {
bo->mapped = dma_map_sg(vgdev->vdev->dev.parent, shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
bo->pages->sgl, bo->pages->nents, shmem->pages->sgl,
shmem->pages->nents,
DMA_TO_DEVICE); DMA_TO_DEVICE);
*nents = bo->mapped; *nents = shmem->mapped;
} else { } else {
*nents = bo->pages->nents; *nents = shmem->pages->nents;
} }
*ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry), *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
...@@ -162,7 +165,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, ...@@ -162,7 +165,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
return -ENOMEM; return -ENOMEM;
} }
for_each_sg(bo->pages->sgl, sg, *nents, si) { for_each_sg(shmem->pages->sgl, sg, *nents, si) {
(*ents)[si].addr = cpu_to_le64(use_dma_api (*ents)[si].addr = cpu_to_le64(use_dma_api
? sg_dma_address(sg) ? sg_dma_address(sg)
: sg_phys(sg)); : sg_phys(sg));
......
...@@ -600,10 +600,11 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, ...@@ -600,10 +600,11 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api) if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent, dma_sync_sg_for_device(vgdev->vdev->dev.parent,
bo->pages->sgl, bo->pages->nents, shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE); DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
...@@ -1015,10 +1016,11 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, ...@@ -1015,10 +1016,11 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf; struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api) if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent, dma_sync_sg_for_device(vgdev->vdev->dev.parent,
bo->pages->sgl, bo->pages->nents, shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE); DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment