Commit a20c4173 authored by Ezequiel Garcia's avatar Ezequiel Garcia Committed by Gerd Hoffmann

virtio: Rework virtio_gpu_object_kmap()

Currently, virtio_gpu_object_kmap() is only called by
virtio_gpufb_create(), when a DRM framebuffer is created.

Thus, instead of returning the vmap'ed address, emit a warning
if virtio_gpu_object_kmap is called on an already mapped
object. With this change, kmap/kunmap calls are now balanced.
Signed-off-by: default avatarEzequiel Garcia <ezequiel@collabora.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20180925161606.17980-3-ezequiel@collabora.comSigned-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
parent 02c87cab
...@@ -364,7 +364,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, ...@@ -364,7 +364,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
unsigned long size, bool kernel, bool pinned, unsigned long size, bool kernel, bool pinned,
struct virtio_gpu_object **bo_ptr); struct virtio_gpu_object **bo_ptr);
void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo); void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr); int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
struct virtio_gpu_object *bo); struct virtio_gpu_object *bo);
void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo); void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
......
...@@ -203,12 +203,6 @@ static struct fb_ops virtio_gpufb_ops = { ...@@ -203,12 +203,6 @@ static struct fb_ops virtio_gpufb_ops = {
.fb_imageblit = virtio_gpu_3d_imageblit, .fb_imageblit = virtio_gpu_3d_imageblit,
}; };
static int virtio_gpu_vmap_fb(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj)
{
return virtio_gpu_object_kmap(obj, NULL);
}
static int virtio_gpufb_create(struct drm_fb_helper *helper, static int virtio_gpufb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes) struct drm_fb_helper_surface_size *sizes)
{ {
...@@ -241,9 +235,9 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, ...@@ -241,9 +235,9 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
virtio_gpu_cmd_create_resource(vgdev, resid, format, virtio_gpu_cmd_create_resource(vgdev, resid, format,
mode_cmd.width, mode_cmd.height); mode_cmd.width, mode_cmd.height);
ret = virtio_gpu_vmap_fb(vgdev, obj); ret = virtio_gpu_object_kmap(obj);
if (ret) { if (ret) {
DRM_ERROR("failed to vmap fb %d\n", ret); DRM_ERROR("failed to kmap fb %d\n", ret);
goto err_obj_vmap; goto err_obj_vmap;
} }
......
...@@ -107,22 +107,17 @@ void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo) ...@@ -107,22 +107,17 @@ void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo)
ttm_bo_kunmap(&bo->kmap); ttm_bo_kunmap(&bo->kmap);
} }
int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr) int virtio_gpu_object_kmap(struct virtio_gpu_object *bo)
{ {
bool is_iomem; bool is_iomem;
int r; int r;
if (bo->vmap) { WARN_ON(bo->vmap);
if (ptr)
*ptr = bo->vmap;
return 0;
}
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
if (r) if (r)
return r; return r;
bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
if (ptr)
*ptr = bo->vmap;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment