Commit 8002db63 authored by Dave Airlie's avatar Dave Airlie

qxl: convert qxl driver to proper use for reservations

The recent addition of lockdep support to reservations and their subsequent
use by TTM showed up a number of potential problems with the way qxl was using
TTM objects.

a) it was allocating objects, and reserving them later without validating
underneath the reservation, which meant in extreme conditions the objects could
be evicted before the reservation ever used them.

b) it was reserving objects straight after allocating them, but with no
ability to back off should the reservations fail. It now allocates the necessary
objects then does a complete reservation pass on them to avoid deadlocks.

c) it had two lists per release tracking objects, unnecessary complicating
the reservation process.

This patch removes the dual object tracking, adds reservations ticket support
to the release and fence object handling. It then ports the internal fb
drawing code and the userspace facing ioctl to use the new interfaces properly,
along with cleanup up the error path handling in some codepaths.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 4f49ec92
......@@ -179,9 +179,10 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
cmd.type = type;
cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
}
......@@ -191,9 +192,10 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
cmd.type = type;
cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
}
......@@ -214,7 +216,6 @@ int qxl_garbage_collect(struct qxl_device *qdev)
struct qxl_release *release;
uint64_t id, next_id;
int i = 0;
int ret;
union qxl_release_info *info;
while (qxl_ring_pop(qdev->release_ring, &id)) {
......@@ -224,17 +225,10 @@ int qxl_garbage_collect(struct qxl_device *qdev)
if (release == NULL)
break;
ret = qxl_release_reserve(qdev, release, false);
if (ret) {
qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
DRM_ERROR("failed to reserve release %lld\n", id);
}
info = qxl_release_map(qdev, release);
next_id = info->next;
qxl_release_unmap(qdev, release, info);
qxl_release_unreserve(qdev, release);
QXL_INFO(qdev, "popped %lld, next %lld\n", id,
next_id);
......@@ -259,7 +253,9 @@ int qxl_garbage_collect(struct qxl_device *qdev)
return i;
}
int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
int qxl_alloc_bo_reserved(struct qxl_device *qdev,
struct qxl_release *release,
unsigned long size,
struct qxl_bo **_bo)
{
struct qxl_bo *bo;
......@@ -271,15 +267,15 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
DRM_ERROR("failed to allocate VRAM BO\n");
return ret;
}
ret = qxl_bo_reserve(bo, false);
if (unlikely(ret != 0))
ret = qxl_release_list_add(release, bo);
if (ret)
goto out_unref;
*_bo = bo;
return 0;
out_unref:
qxl_bo_unref(&bo);
return 0;
return ret;
}
static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
......@@ -503,6 +499,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
if (ret)
return ret;
ret = qxl_release_reserve_list(release, true);
if (ret)
return ret;
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_CREATE;
cmd->u.surface_create.format = surf->surf.format;
......@@ -524,14 +524,11 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
surf->surf_create = release;
/* no need to add a release to the fence for this bo,
/* no need to add a release to the fence for this surface bo,
since it is only released when we ask to destroy the surface
and it would never signal otherwise */
qxl_fence_releaseable(qdev, release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
qxl_release_unreserve(qdev, release);
qxl_release_fence_buffer_objects(release);
surf->hw_surf_alloc = true;
spin_lock(&qdev->surf_id_idr_lock);
......@@ -573,12 +570,9 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
cmd->surface_id = id;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_fence_releaseable(qdev, release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
qxl_release_unreserve(qdev, release);
qxl_release_fence_buffer_objects(release);
return 0;
}
......
......@@ -179,7 +179,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
kfree(qxl_crtc);
}
static void
static int
qxl_hide_cursor(struct qxl_device *qdev)
{
struct qxl_release *release;
......@@ -188,14 +188,22 @@ qxl_hide_cursor(struct qxl_device *qdev)
ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
&release, NULL);
if (ret)
return ret;
ret = qxl_release_reserve_list(release, true);
if (ret) {
qxl_release_free(qdev, release);
return ret;
}
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_HIDE;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_fence_releaseable(qdev, release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_unreserve(qdev, release);
qxl_release_fence_buffer_objects(release);
return 0;
}
static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
......@@ -216,10 +224,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
int size = 64*64*4;
int ret = 0;
if (!handle) {
qxl_hide_cursor(qdev);
return 0;
}
if (!handle)
return qxl_hide_cursor(qdev);
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
if (!obj) {
......@@ -234,8 +240,9 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
goto out_unref;
ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
qxl_bo_unreserve(user_bo);
if (ret)
goto out_unreserve;
goto out_unref;
ret = qxl_bo_kmap(user_bo, &user_ptr);
if (ret)
......@@ -246,14 +253,20 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
&release, NULL);
if (ret)
goto out_kunmap;
ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size,
ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size,
&cursor_bo);
if (ret)
goto out_free_release;
ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
ret = qxl_release_reserve_list(release, false);
if (ret)
goto out_free_bo;
ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
if (ret)
goto out_backoff;
cursor->header.unique = 0;
cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
cursor->header.width = 64;
......@@ -269,11 +282,7 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
qxl_bo_kunmap(cursor_bo);
/* finish with the userspace bo */
qxl_bo_kunmap(user_bo);
qxl_bo_unpin(user_bo);
qxl_bo_unreserve(user_bo);
drm_gem_object_unreference_unlocked(obj);
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
......@@ -281,30 +290,35 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
cmd->u.set.position.y = qcrtc->cur_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
qxl_release_add_res(qdev, release, cursor_bo);
cmd->u.set.visible = 1;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_fence_releaseable(qdev, release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_unreserve(qdev, release);
qxl_release_fence_buffer_objects(release);
/* finish with the userspace bo */
ret = qxl_bo_reserve(user_bo, false);
if (!ret) {
qxl_bo_unpin(user_bo);
qxl_bo_unreserve(user_bo);
}
drm_gem_object_unreference_unlocked(obj);
qxl_bo_unreserve(cursor_bo);
qxl_bo_unref(&cursor_bo);
return ret;
out_backoff:
qxl_release_backoff_reserve_list(release);
out_free_bo:
qxl_bo_unref(&cursor_bo);
out_free_release:
qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
out_kunmap:
qxl_bo_kunmap(user_bo);
out_unpin:
qxl_bo_unpin(user_bo);
out_unreserve:
qxl_bo_unreserve(user_bo);
out_unref:
drm_gem_object_unreference_unlocked(obj);
return ret;
......@@ -322,6 +336,14 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
&release, NULL);
if (ret)
return ret;
ret = qxl_release_reserve_list(release, true);
if (ret) {
qxl_release_free(qdev, release);
return ret;
}
qcrtc->cur_x = x;
qcrtc->cur_y = y;
......@@ -332,9 +354,9 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
cmd->u.position.y = qcrtc->cur_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_fence_releaseable(qdev, release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_unreserve(qdev, release);
qxl_release_fence_buffer_objects(release);
return 0;
}
......
This diff is collapsed.
......@@ -42,6 +42,9 @@
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
/* just for ttm_validate_buffer */
#include <ttm/ttm_execbuf_util.h>
#include <drm/qxl_drm.h>
#include "qxl_dev.h"
......@@ -118,9 +121,9 @@ struct qxl_bo {
uint32_t surface_id;
struct qxl_fence fence; /* per bo fence - list of releases */
struct qxl_release *surf_create;
atomic_t reserve_count;
};
#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
#define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
struct qxl_gem {
struct mutex mutex;
......@@ -128,12 +131,7 @@ struct qxl_gem {
};
struct qxl_bo_list {
struct list_head lhead;
struct qxl_bo *bo;
};
struct qxl_reloc_list {
struct list_head bos;
struct ttm_validate_buffer tv;
};
struct qxl_crtc {
......@@ -195,10 +193,20 @@ enum {
struct qxl_release {
int id;
int type;
int bo_count;
uint32_t release_offset;
uint32_t surface_release_id;
struct qxl_bo *bos[QXL_MAX_RES];
struct ww_acquire_ctx ticket;
struct list_head bos;
};
struct qxl_drm_chunk {
struct list_head head;
struct qxl_bo *bo;
};
struct qxl_drm_image {
struct qxl_bo *bo;
struct list_head chunk_list;
};
struct qxl_fb_image {
......@@ -434,12 +442,19 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
/* qxl image */
int qxl_image_create(struct qxl_device *qdev,
int qxl_image_init(struct qxl_device *qdev,
struct qxl_release *release,
struct qxl_bo **image_bo,
struct qxl_drm_image *dimage,
const uint8_t *data,
int x, int y, int width, int height,
int depth, int stride);
int
qxl_image_alloc_objects(struct qxl_device *qdev,
struct qxl_release *release,
struct qxl_drm_image **image_ptr,
int height, int stride);
void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
void qxl_update_screen(struct qxl_device *qxl);
/* qxl io operations (qxl_cmd.c) */
......@@ -460,20 +475,15 @@ int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible
void qxl_io_flush_release(struct qxl_device *qdev);
void qxl_io_flush_surfaces(struct qxl_device *qdev);
int qxl_release_reserve(struct qxl_device *qdev,
struct qxl_release *release, bool no_wait);
void qxl_release_unreserve(struct qxl_device *qdev,
struct qxl_release *release);
union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
struct qxl_release *release);
void qxl_release_unmap(struct qxl_device *qdev,
struct qxl_release *release,
union qxl_release_info *info);
/*
* qxl_bo_add_resource.
*
*/
void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo);
int qxl_release_reserve_list(struct qxl_release *release, bool no_intr);
void qxl_release_backoff_reserve_list(struct qxl_release *release);
void qxl_release_fence_buffer_objects(struct qxl_release *release);
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
enum qxl_surface_cmd_type surface_cmd_type,
......@@ -482,15 +492,16 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
int type, struct qxl_release **release,
struct qxl_bo **rbo);
int qxl_fence_releaseable(struct qxl_device *qdev,
struct qxl_release *release);
int
qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
uint32_t type, bool interruptible);
int
qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
uint32_t type, bool interruptible);
int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
int qxl_alloc_bo_reserved(struct qxl_device *qdev,
struct qxl_release *release,
unsigned long size,
struct qxl_bo **_bo);
/* qxl drawing commands */
......@@ -511,15 +522,9 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
u32 sx, u32 sy,
u32 dx, u32 dy);
uint64_t
qxl_release_alloc(struct qxl_device *qdev, int type,
struct qxl_release **ret);
void qxl_release_free(struct qxl_device *qdev,
struct qxl_release *release);
void qxl_release_add_res(struct qxl_device *qdev,
struct qxl_release *release,
struct qxl_bo *bo);
/* used by qxl_debugfs_release */
struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
uint64_t id);
......@@ -562,7 +567,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freein
int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
/* qxl_fence.c */
int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id);
void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id);
int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
void qxl_fence_fini(struct qxl_fence *qfence);
......
......@@ -49,17 +49,11 @@
For some reason every so often qxl hw fails to release, things go wrong.
*/
int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
/* must be called with the fence lock held */
void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id)
{
struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
spin_lock(&bo->tbo.bdev->fence_lock);
radix_tree_insert(&qfence->tree, rel_id, qfence);
qfence->num_active_releases++;
spin_unlock(&bo->tbo.bdev->fence_lock);
return 0;
}
int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
......
......@@ -30,30 +30,99 @@
#include "qxl_object.h"
static int
qxl_image_create_helper(struct qxl_device *qdev,
qxl_allocate_chunk(struct qxl_device *qdev,
struct qxl_release *release,
struct qxl_bo **image_bo,
struct qxl_drm_image *image,
unsigned int chunk_size)
{
struct qxl_drm_chunk *chunk;
int ret;
chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL);
if (!chunk)
return -ENOMEM;
ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
if (ret) {
kfree(chunk);
return ret;
}
list_add_tail(&chunk->head, &image->chunk_list);
return 0;
}
int
qxl_image_alloc_objects(struct qxl_device *qdev,
struct qxl_release *release,
struct qxl_drm_image **image_ptr,
int height, int stride)
{
struct qxl_drm_image *image;
int ret;
image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL);
if (!image)
return -ENOMEM;
INIT_LIST_HEAD(&image->chunk_list);
ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
if (ret) {
kfree(image);
return ret;
}
ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
if (ret) {
qxl_bo_unref(&image->bo);
kfree(image);
return ret;
}
*image_ptr = image;
return 0;
}
void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
{
struct qxl_drm_chunk *chunk, *tmp;
list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) {
qxl_bo_unref(&chunk->bo);
kfree(chunk);
}
qxl_bo_unref(&dimage->bo);
kfree(dimage);
}
static int
qxl_image_init_helper(struct qxl_device *qdev,
struct qxl_release *release,
struct qxl_drm_image *dimage,
const uint8_t *data,
int width, int height,
int depth, unsigned int hash,
int stride)
{
struct qxl_drm_chunk *drv_chunk;
struct qxl_image *image;
struct qxl_data_chunk *chunk;
int i;
int chunk_stride;
int linesize = width * depth / 8;
struct qxl_bo *chunk_bo;
int ret;
struct qxl_bo *chunk_bo, *image_bo;
void *ptr;
/* Chunk */
/* FIXME: Check integer overflow */
/* TODO: variable number of chunks */
drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head);
chunk_bo = drv_chunk->bo;
chunk_stride = stride; /* TODO: should use linesize, but it renders
wrong (check the bitmaps are sent correctly
first) */
ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride,
&chunk_bo);
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
chunk = ptr;
......@@ -102,7 +171,6 @@ qxl_image_create_helper(struct qxl_device *qdev,
while (remain > 0) {
page_base = out_offset & PAGE_MASK;
page_offset = offset_in_page(out_offset);
size = min((int)(PAGE_SIZE - page_offset), remain);
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
......@@ -116,14 +184,10 @@ qxl_image_create_helper(struct qxl_device *qdev,
}
}
}
qxl_bo_kunmap(chunk_bo);
/* Image */
ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo);
ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
image_bo = dimage->bo;
ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
image = ptr;
image->descriptor.id = 0;
......@@ -154,23 +218,20 @@ qxl_image_create_helper(struct qxl_device *qdev,
image->u.bitmap.stride = chunk_stride;
image->u.bitmap.palette = 0;
image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
qxl_release_add_res(qdev, release, chunk_bo);
qxl_bo_unreserve(chunk_bo);
qxl_bo_unref(&chunk_bo);
qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr);
qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
return 0;
}
int qxl_image_create(struct qxl_device *qdev,
int qxl_image_init(struct qxl_device *qdev,
struct qxl_release *release,
struct qxl_bo **image_bo,
struct qxl_drm_image *dimage,
const uint8_t *data,
int x, int y, int width, int height,
int depth, int stride)
{
data += y * stride + x * (depth / 8);
return qxl_image_create_helper(qdev, release, image_bo, data,
return qxl_image_init_helper(qdev, release, dimage, data,
width, height, depth, 0, stride);
}
This diff is collapsed.
......@@ -104,7 +104,7 @@ int qxl_bo_create(struct qxl_device *qdev,
bo->surface_id = 0;
qxl_fence_init(qdev, &bo->fence);
INIT_LIST_HEAD(&bo->list);
atomic_set(&bo->reserve_count, 0);
if (surf)
bo->surf = *surf;
......@@ -316,53 +316,6 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
return 0;
}
void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
{
struct qxl_bo_list *entry, *sf;
list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
qxl_bo_unreserve(entry->bo);
list_del(&entry->lhead);
kfree(entry);
}
}
int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
{
struct qxl_bo_list *entry;
int ret;
list_for_each_entry(entry, &reloc_list->bos, lhead) {
if (entry->bo == bo)
return 0;
}
entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->bo = bo;
list_add(&entry->lhead, &reloc_list->bos);
ret = qxl_bo_reserve(bo, false);
if (ret)
return ret;
if (!bo->pin_count) {
qxl_ttm_placement_from_domain(bo, bo->type, false);
ret = ttm_bo_validate(&bo->tbo, &bo->placement,
true, false);
if (ret)
return ret;
}
/* allocate a surface for reserved + validated buffers */
ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
if (ret)
return ret;
return 0;
}
int qxl_surf_evict(struct qxl_device *qdev)
{
return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
......
......@@ -102,6 +102,4 @@ extern int qxl_bo_unpin(struct qxl_bo *bo);
extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
#endif
......@@ -38,7 +38,8 @@
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
uint64_t
static uint64_t
qxl_release_alloc(struct qxl_device *qdev, int type,
struct qxl_release **ret)
{
......@@ -53,9 +54,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
return 0;
}
release->type = type;
release->bo_count = 0;
release->release_offset = 0;
release->surface_release_id = 0;
INIT_LIST_HEAD(&release->bos);
idr_preload(GFP_KERNEL);
spin_lock(&qdev->release_idr_lock);
......@@ -77,20 +78,20 @@ void
qxl_release_free(struct qxl_device *qdev,
struct qxl_release *release)
{
int i;
QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id,
release->type, release->bo_count);
struct qxl_bo_list *entry, *tmp;
QXL_INFO(qdev, "release %d, type %d\n", release->id,
release->type);
if (release->surface_release_id)
qxl_surface_id_dealloc(qdev, release->surface_release_id);
for (i = 0 ; i < release->bo_count; ++i) {
list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
QXL_INFO(qdev, "release %llx\n",
release->bos[i]->tbo.addr_space_offset
entry->tv.bo->addr_space_offset
- DRM_FILE_OFFSET);
qxl_fence_remove_release(&release->bos[i]->fence, release->id);
qxl_bo_unref(&release->bos[i]);
qxl_fence_remove_release(&bo->fence, release->id);
qxl_bo_unref(&bo);
}
spin_lock(&qdev->release_idr_lock);
idr_remove(&qdev->release_idr, release->id);
......@@ -98,22 +99,6 @@ qxl_release_free(struct qxl_device *qdev,
kfree(release);
}
void
qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
struct qxl_bo *bo)
{
int i;
for (i = 0; i < release->bo_count; i++)
if (release->bos[i] == bo)
return;
if (release->bo_count >= QXL_MAX_RES) {
DRM_ERROR("exceeded max resource on a qxl_release item\n");
return;
}
release->bos[release->bo_count++] = qxl_bo_ref(bo);
}
static int qxl_release_bo_alloc(struct qxl_device *qdev,
struct qxl_bo **bo)
{
......@@ -125,58 +110,106 @@ static int qxl_release_bo_alloc(struct qxl_device *qdev,
return ret;
}
int qxl_release_reserve(struct qxl_device *qdev,
struct qxl_release *release, bool no_wait)
int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
{
struct qxl_bo_list *entry;
list_for_each_entry(entry, &release->bos, tv.head) {
if (entry->tv.bo == &bo->tbo)
return 0;
}
entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
if (!entry)
return -ENOMEM;
qxl_bo_ref(bo);
entry->tv.bo = &bo->tbo;
list_add_tail(&entry->tv.head, &release->bos);
return 0;
}
static int qxl_release_validate_bo(struct qxl_bo *bo)
{
int ret;
if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) {
ret = qxl_bo_reserve(release->bos[0], no_wait);
if (!bo->pin_count) {
qxl_ttm_placement_from_domain(bo, bo->type, false);
ret = ttm_bo_validate(&bo->tbo, &bo->placement,
true, false);
if (ret)
return ret;
}
/* allocate a surface for reserved + validated buffers */
ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
if (ret)
return ret;
return 0;
}
void qxl_release_unreserve(struct qxl_device *qdev,
struct qxl_release *release)
int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
{
if (atomic_dec_and_test(&release->bos[0]->reserve_count))
qxl_bo_unreserve(release->bos[0]);
int ret;
struct qxl_bo_list *entry;
/* if only one object on the release its the release itself
since these objects are pinned no need to reserve */
if (list_is_singular(&release->bos))
return 0;
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos);
if (ret)
return ret;
list_for_each_entry(entry, &release->bos, tv.head) {
struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
ret = qxl_release_validate_bo(bo);
if (ret) {
ttm_eu_backoff_reservation(&release->ticket, &release->bos);
return ret;
}
}
return 0;
}
void qxl_release_backoff_reserve_list(struct qxl_release *release)
{
/* if only one object on the release its the release itself
since these objects are pinned no need to reserve */
if (list_is_singular(&release->bos))
return;
ttm_eu_backoff_reservation(&release->ticket, &release->bos);
}
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
enum qxl_surface_cmd_type surface_cmd_type,
struct qxl_release *create_rel,
struct qxl_release **release)
{
int ret;
if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
int idr_ret;
struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
struct qxl_bo *bo;
union qxl_release_info *info;
/* stash the release after the create command */
idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
bo = qxl_bo_ref(create_rel->bos[0]);
bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
(*release)->release_offset = create_rel->release_offset + 64;
qxl_release_add_res(qdev, *release, bo);
qxl_release_list_add(*release, bo);
ret = qxl_release_reserve(qdev, *release, false);
if (ret) {
DRM_ERROR("release reserve failed\n");
goto out_unref;
}
info = qxl_release_map(qdev, *release);
info->id = idr_ret;
qxl_release_unmap(qdev, *release, info);
out_unref:
qxl_bo_unref(&bo);
return ret;
return 0;
}
return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
......@@ -189,7 +222,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
{
struct qxl_bo *bo;
int idr_ret;
int ret;
int ret = 0;
union qxl_release_info *info;
int cur_idx;
......@@ -228,36 +261,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
if (rbo)
*rbo = bo;
qxl_release_add_res(qdev, *release, bo);
ret = qxl_release_reserve(qdev, *release, false);
mutex_unlock(&qdev->release_mutex);
if (ret)
goto out_unref;
qxl_release_list_add(*release, bo);
info = qxl_release_map(qdev, *release);
info->id = idr_ret;
qxl_release_unmap(qdev, *release, info);
out_unref:
qxl_bo_unref(&bo);
return ret;
}
int qxl_fence_releaseable(struct qxl_device *qdev,
struct qxl_release *release)
{
int i, ret;
for (i = 0; i < release->bo_count; i++) {
if (!release->bos[i]->tbo.sync_obj)
release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
if (ret)
return ret;
}
return 0;
}
struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
uint64_t id)
{
......@@ -270,10 +285,7 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
DRM_ERROR("failed to find id in release_idr\n");
return NULL;
}
if (release->bo_count < 1) {
DRM_ERROR("read a released resource with 0 bos\n");
return NULL;
}
return release;
}
......@@ -282,9 +294,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
{
void *ptr;
union qxl_release_info *info;
struct qxl_bo *bo = release->bos[0];
struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
if (!ptr)
return NULL;
info = ptr + (release->release_offset & ~PAGE_SIZE);
return info;
}
......@@ -293,9 +308,51 @@ void qxl_release_unmap(struct qxl_device *qdev,
struct qxl_release *release,
union qxl_release_info *info)
{
struct qxl_bo *bo = release->bos[0];
struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
void *ptr;
ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
}
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
struct ttm_bo_driver *driver;
struct qxl_bo *qbo;
/* if only one object on the release its the release itself
since these objects are pinned no need to reserve */
if (list_is_singular(&release->bos))
return;
bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
bdev = bo->bdev;
driver = bdev->driver;
glob = bo->glob;
spin_lock(&glob->lru_lock);
spin_lock(&bdev->fence_lock);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
qbo = to_qxl_bo(bo);
if (!entry->bo->sync_obj)
entry->bo->sync_obj = &qbo->fence;
qxl_fence_add_release_locked(&qbo->fence, release->id);
ttm_bo_add_to_lru(bo);
ww_mutex_unlock(&bo->resv->lock);
entry->reserved = false;
}
spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock);
ww_acquire_fini(&release->ticket);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment