Commit 10d123b2 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-3.18' of git://people.freedesktop.org/~agd5f/linux into drm-next

concurrent buffer reads.

* 'drm-next-3.18' of git://people.freedesktop.org/~agd5f/linux:
  drm/radeon: allow concurrent buffer reads
  drm/radeon: add the infrastructure for concurrent buffer access
  drm/ttm: allow fence to be added as shared
parents e351943b 298593b6
...@@ -226,6 +226,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) ...@@ -226,6 +226,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
qxl_bo_ref(bo); qxl_bo_ref(bo);
entry->tv.bo = &bo->tbo; entry->tv.bo = &bo->tbo;
entry->tv.shared = false;
list_add_tail(&entry->tv.head, &release->bos); list_add_tail(&entry->tv.head, &release->bos);
return 0; return 0;
} }
......
...@@ -3959,18 +3959,19 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev, ...@@ -3959,18 +3959,19 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,
* @src_offset: src GPU address * @src_offset: src GPU address
* @dst_offset: dst GPU address * @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer * @num_gpu_pages: number of GPU pages to xfer
* @fence: radeon fence object * @resv: reservation object to sync to
* *
* Copy GPU paging using the CP DMA engine (CIK+). * Copy GPU paging using the CP DMA engine (CIK+).
* Used by the radeon ttm implementation to move pages if * Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback. * registered as the asic copy callback.
*/ */
int cik_copy_cpdma(struct radeon_device *rdev, struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence) struct reservation_object *resv)
{ {
struct radeon_semaphore *sem = NULL; struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
int ring_index = rdev->asic->copy.blit_ring_index; int ring_index = rdev->asic->copy.blit_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index]; struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes, control; u32 size_in_bytes, cur_size_in_bytes, control;
...@@ -3980,7 +3981,7 @@ int cik_copy_cpdma(struct radeon_device *rdev, ...@@ -3980,7 +3981,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
r = radeon_semaphore_create(rdev, &sem); r = radeon_semaphore_create(rdev, &sem);
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); DRM_ERROR("radeon: moving bo (%d).\n", r);
return r; return ERR_PTR(r);
} }
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
...@@ -3989,10 +3990,10 @@ int cik_copy_cpdma(struct radeon_device *rdev, ...@@ -3989,10 +3990,10 @@ int cik_copy_cpdma(struct radeon_device *rdev,
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL); radeon_semaphore_free(rdev, &sem, NULL);
return r; return ERR_PTR(r);
} }
radeon_semaphore_sync_to(sem, *fence); radeon_semaphore_sync_resv(sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx); radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) { for (i = 0; i < num_loops; i++) {
...@@ -4014,17 +4015,17 @@ int cik_copy_cpdma(struct radeon_device *rdev, ...@@ -4014,17 +4015,17 @@ int cik_copy_cpdma(struct radeon_device *rdev,
dst_offset += cur_size_in_bytes; dst_offset += cur_size_in_bytes;
} }
r = radeon_fence_emit(rdev, fence, ring->idx); r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) { if (r) {
radeon_ring_unlock_undo(rdev, ring); radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL); radeon_semaphore_free(rdev, &sem, NULL);
return r; return ERR_PTR(r);
} }
radeon_ring_unlock_commit(rdev, ring, false); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, fence);
return r; return fence;
} }
/* /*
......
...@@ -537,18 +537,19 @@ void cik_sdma_fini(struct radeon_device *rdev) ...@@ -537,18 +537,19 @@ void cik_sdma_fini(struct radeon_device *rdev)
* @src_offset: src GPU address * @src_offset: src GPU address
* @dst_offset: dst GPU address * @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer * @num_gpu_pages: number of GPU pages to xfer
* @fence: radeon fence object * @resv: reservation object to sync to
* *
* Copy GPU paging using the DMA engine (CIK). * Copy GPU paging using the DMA engine (CIK).
* Used by the radeon ttm implementation to move pages if * Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback. * registered as the asic copy callback.
*/ */
int cik_copy_dma(struct radeon_device *rdev, struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence) struct reservation_object *resv)
{ {
struct radeon_semaphore *sem = NULL; struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
int ring_index = rdev->asic->copy.dma_ring_index; int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index]; struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes; u32 size_in_bytes, cur_size_in_bytes;
...@@ -558,7 +559,7 @@ int cik_copy_dma(struct radeon_device *rdev, ...@@ -558,7 +559,7 @@ int cik_copy_dma(struct radeon_device *rdev,
r = radeon_semaphore_create(rdev, &sem); r = radeon_semaphore_create(rdev, &sem);
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); DRM_ERROR("radeon: moving bo (%d).\n", r);
return r; return ERR_PTR(r);
} }
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
...@@ -567,10 +568,10 @@ int cik_copy_dma(struct radeon_device *rdev, ...@@ -567,10 +568,10 @@ int cik_copy_dma(struct radeon_device *rdev,
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL); radeon_semaphore_free(rdev, &sem, NULL);
return r; return ERR_PTR(r);
} }
radeon_semaphore_sync_to(sem, *fence); radeon_semaphore_sync_resv(sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx); radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) { for (i = 0; i < num_loops; i++) {
...@@ -589,17 +590,17 @@ int cik_copy_dma(struct radeon_device *rdev, ...@@ -589,17 +590,17 @@ int cik_copy_dma(struct radeon_device *rdev,
dst_offset += cur_size_in_bytes; dst_offset += cur_size_in_bytes;
} }
r = radeon_fence_emit(rdev, fence, ring->idx); r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) { if (r) {
radeon_ring_unlock_undo(rdev, ring); radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL); radeon_semaphore_free(rdev, &sem, NULL);
return r; return ERR_PTR(r);
} }
radeon_ring_unlock_commit(rdev, ring, false); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, fence);
return r; return fence;
} }
/** /**
......
...@@ -104,12 +104,14 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, ...@@ -104,12 +104,14 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
* Used by the radeon ttm implementation to move pages if * Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback. * registered as the asic copy callback.
*/ */
int evergreen_copy_dma(struct radeon_device *rdev, struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset,
unsigned num_gpu_pages, uint64_t dst_offset,
struct radeon_fence **fence) unsigned num_gpu_pages,
struct reservation_object *resv)
{ {
struct radeon_semaphore *sem = NULL; struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
int ring_index = rdev->asic->copy.dma_ring_index; int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index]; struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw; u32 size_in_dw, cur_size_in_dw;
...@@ -119,7 +121,7 @@ int evergreen_copy_dma(struct radeon_device *rdev, ...@@ -119,7 +121,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
r = radeon_semaphore_create(rdev, &sem); r = radeon_semaphore_create(rdev, &sem);
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); DRM_ERROR("radeon: moving bo (%d).\n", r);
return r; return ERR_PTR(r);
} }
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
...@@ -128,10 +130,10 @@ int evergreen_copy_dma(struct radeon_device *rdev, ...@@ -128,10 +130,10 @@ int evergreen_copy_dma(struct radeon_device *rdev,
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL); radeon_semaphore_free(rdev, &sem, NULL);
return r; return ERR_PTR(r);
} }
radeon_semaphore_sync_to(sem, *fence); radeon_semaphore_sync_resv(sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx); radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) { for (i = 0; i < num_loops; i++) {
...@@ -148,17 +150,17 @@ int evergreen_copy_dma(struct radeon_device *rdev, ...@@ -148,17 +150,17 @@ int evergreen_copy_dma(struct radeon_device *rdev,
dst_offset += cur_size_in_dw * 4; dst_offset += cur_size_in_dw * 4;
} }
r = radeon_fence_emit(rdev, fence, ring->idx); r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) { if (r) {
radeon_ring_unlock_undo(rdev, ring); radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL); radeon_semaphore_free(rdev, &sem, NULL);
return r; return ERR_PTR(r);
} }
radeon_ring_unlock_commit(rdev, ring, false); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, fence);
return r; return fence;
} }
/** /**
......
...@@ -855,13 +855,14 @@ bool r100_semaphore_ring_emit(struct radeon_device *rdev, ...@@ -855,13 +855,14 @@ bool r100_semaphore_ring_emit(struct radeon_device *rdev,
return false; return false;
} }
int r100_copy_blit(struct radeon_device *rdev, struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence) struct reservation_object *resv)
{ {
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
struct radeon_fence *fence;
uint32_t cur_pages; uint32_t cur_pages;
uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t pitch; uint32_t pitch;
...@@ -882,7 +883,7 @@ int r100_copy_blit(struct radeon_device *rdev, ...@@ -882,7 +883,7 @@ int r100_copy_blit(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ring, ndw); r = radeon_ring_lock(rdev, ring, ndw);
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
return -EINVAL; return ERR_PTR(-EINVAL);
} }
while (num_gpu_pages > 0) { while (num_gpu_pages > 0) {
cur_pages = num_gpu_pages; cur_pages = num_gpu_pages;
...@@ -922,11 +923,13 @@ int r100_copy_blit(struct radeon_device *rdev, ...@@ -922,11 +923,13 @@ int r100_copy_blit(struct radeon_device *rdev,
RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_HOST_IDLECLEAN | RADEON_WAIT_HOST_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE); RADEON_WAIT_DMA_GUI_IDLE);
if (fence) { r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); if (r) {
radeon_ring_unlock_undo(rdev, ring);
return ERR_PTR(r);
} }
radeon_ring_unlock_commit(rdev, ring, false); radeon_ring_unlock_commit(rdev, ring, false);
return r; return fence;
} }
static int r100_cp_wait_for_idle(struct radeon_device *rdev) static int r100_cp_wait_for_idle(struct radeon_device *rdev)
......
...@@ -80,13 +80,14 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0) ...@@ -80,13 +80,14 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
return vtx_size; return vtx_size;
} }
int r200_copy_dma(struct radeon_device *rdev, struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence) struct reservation_object *resv)
{ {
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
struct radeon_fence *fence;
uint32_t size; uint32_t size;
uint32_t cur_size; uint32_t cur_size;
int i, num_loops; int i, num_loops;
...@@ -98,7 +99,7 @@ int r200_copy_dma(struct radeon_device *rdev, ...@@ -98,7 +99,7 @@ int r200_copy_dma(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64); r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); DRM_ERROR("radeon: moving bo (%d).\n", r);
return r; return ERR_PTR(r);
} }
/* Must wait for 2D idle & clean before DMA or hangs might happen */ /* Must wait for 2D idle & clean before DMA or hangs might happen */
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
...@@ -118,11 +119,13 @@ int r200_copy_dma(struct radeon_device *rdev, ...@@ -118,11 +119,13 @@ int r200_copy_dma(struct radeon_device *rdev,
} }
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE); radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) { r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); if (r) {
radeon_ring_unlock_undo(rdev, ring);
return ERR_PTR(r);
} }
radeon_ring_unlock_commit(rdev, ring, false); radeon_ring_unlock_commit(rdev, ring, false);
return r; return fence;
} }
......
...@@ -2894,12 +2894,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev, ...@@ -2894,12 +2894,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
* Used by the radeon ttm implementation to move pages if * Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback. * registered as the asic copy callback.
*/ */
int r600_copy_cpdma(struct radeon_device *rdev, struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence) struct reservation_object *resv)
{ {
struct radeon_semaphore *sem = NULL; struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
int ring_index = rdev->asic->copy.blit_ring_index; int ring_index = rdev->asic->copy.blit_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index]; struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes, tmp; u32 size_in_bytes, cur_size_in_bytes, tmp;
...@@ -2909,7 +2910,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, ...@@ -2909,7 +2910,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
r = radeon_semaphore_create(rdev, &sem); r = radeon_semaphore_create(rdev, &sem);
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); DRM_ERROR("radeon: moving bo (%d).\n", r);
return r; return ERR_PTR(r);
} }
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
...@@ -2918,10 +2919,10 @@ int r600_copy_cpdma(struct radeon_device *rdev, ...@@ -2918,10 +2919,10 @@ int r600_copy_cpdma(struct radeon_device *rdev,
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL); radeon_semaphore_free(rdev, &sem, NULL);
return r; return ERR_PTR(r);
} }
radeon_semaphore_sync_to(sem, *fence); radeon_semaphore_sync_resv(sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx); radeon_semaphore_sync_rings(rdev, sem, ring->idx);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
...@@ -2948,17 +2949,17 @@ int r600_copy_cpdma(struct radeon_device *rdev, ...@@ -2948,17 +2949,17 @@ int r600_copy_cpdma(struct radeon_device *rdev,
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit); radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
r = radeon_fence_emit(rdev, fence, ring->idx); r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) { if (r) {
radeon_ring_unlock_undo(rdev, ring); radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL); radeon_semaphore_free(rdev, &sem, NULL);
return r; return ERR_PTR(r);
} }
radeon_ring_unlock_commit(rdev, ring, false); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, fence);
return r; return fence;
} }
int r600_set_surface_reg(struct radeon_device *rdev, int reg, int r600_set_surface_reg(struct radeon_device *rdev, int reg,
......
...@@ -436,18 +436,19 @@ void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) ...@@ -436,18 +436,19 @@ void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
* @src_offset: src GPU address * @src_offset: src GPU address
* @dst_offset: dst GPU address * @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer * @num_gpu_pages: number of GPU pages to xfer
* @fence: radeon fence object * @resv: reservation object to sync to
* *
* Copy GPU paging using the DMA engine (r6xx). * Copy GPU paging using the DMA engine (r6xx).
* Used by the radeon ttm implementation to move pages if * Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback. * registered as the asic copy callback.
*/ */
int r600_copy_dma(struct radeon_device *rdev, struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence) struct reservation_object *resv)
{ {
struct radeon_semaphore *sem = NULL; struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
int ring_index = rdev->asic->copy.dma_ring_index; int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index]; struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw; u32 size_in_dw, cur_size_in_dw;
...@@ -457,7 +458,7 @@ int r600_copy_dma(struct radeon_device *rdev, ...@@ -457,7 +458,7 @@ int r600_copy_dma(struct radeon_device *rdev,
r = radeon_semaphore_create(rdev, &sem); r = radeon_semaphore_create(rdev, &sem);
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); DRM_ERROR("radeon: moving bo (%d).\n", r);
return r; return ERR_PTR(r);
} }
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
...@@ -466,10 +467,10 @@ int r600_copy_dma(struct radeon_device *rdev, ...@@ -466,10 +467,10 @@ int r600_copy_dma(struct radeon_device *rdev,
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL); radeon_semaphore_free(rdev, &sem, NULL);
return r; return ERR_PTR(r);
} }
radeon_semaphore_sync_to(sem, *fence); radeon_semaphore_sync_resv(sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx); radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) { for (i = 0; i < num_loops; i++) {
...@@ -486,15 +487,15 @@ int r600_copy_dma(struct radeon_device *rdev, ...@@ -486,15 +487,15 @@ int r600_copy_dma(struct radeon_device *rdev,
dst_offset += cur_size_in_dw * 4; dst_offset += cur_size_in_dw * 4;
} }
r = radeon_fence_emit(rdev, fence, ring->idx); r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) { if (r) {
radeon_ring_unlock_undo(rdev, ring); radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL); radeon_semaphore_free(rdev, &sem, NULL);
return r; return ERR_PTR(r);
} }
radeon_ring_unlock_commit(rdev, ring, false); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, fence);
return r; return fence;
} }
...@@ -585,8 +585,11 @@ bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, ...@@ -585,8 +585,11 @@ bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore); struct radeon_semaphore *semaphore);
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore); struct radeon_semaphore *semaphore);
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
struct radeon_fence *fence); struct radeon_fence *fence);
void radeon_semaphore_sync_resv(struct radeon_semaphore *semaphore,
struct reservation_object *resv,
bool shared);
int radeon_semaphore_sync_rings(struct radeon_device *rdev, int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
int waiting_ring); int waiting_ring);
...@@ -1855,24 +1858,24 @@ struct radeon_asic { ...@@ -1855,24 +1858,24 @@ struct radeon_asic {
} display; } display;
/* copy functions for bo handling */ /* copy functions for bo handling */
struct { struct {
int (*blit)(struct radeon_device *rdev, struct radeon_fence *(*blit)(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence); struct reservation_object *resv);
u32 blit_ring_index; u32 blit_ring_index;
int (*dma)(struct radeon_device *rdev, struct radeon_fence *(*dma)(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence); struct reservation_object *resv);
u32 dma_ring_index; u32 dma_ring_index;
/* method used for bo copy */ /* method used for bo copy */
int (*copy)(struct radeon_device *rdev, struct radeon_fence *(*copy)(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence); struct reservation_object *resv);
/* ring used for bo copies */ /* ring used for bo copies */
u32 copy_ring_index; u32 copy_ring_index;
} copy; } copy;
...@@ -2833,9 +2836,9 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) ...@@ -2833,9 +2836,9 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m)) #define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence)) #define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) #define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) #define radeon_copy_blit(rdev, s, d, np, resv) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f)) #define radeon_copy_dma(rdev, s, d, np, resv) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f)) #define radeon_copy(rdev, s, d, np, resv) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv))
#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index #define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index #define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index #define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
......
...@@ -81,11 +81,11 @@ bool r100_semaphore_ring_emit(struct radeon_device *rdev, ...@@ -81,11 +81,11 @@ bool r100_semaphore_ring_emit(struct radeon_device *rdev,
int r100_cs_parse(struct radeon_cs_parser *p); int r100_cs_parse(struct radeon_cs_parser *p);
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg); uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
int r100_copy_blit(struct radeon_device *rdev, struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence); struct reservation_object *resv);
int r100_set_surface_reg(struct radeon_device *rdev, int reg, int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch, uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size); uint32_t offset, uint32_t obj_size);
...@@ -153,11 +153,11 @@ void r100_ring_hdp_flush(struct radeon_device *rdev, ...@@ -153,11 +153,11 @@ void r100_ring_hdp_flush(struct radeon_device *rdev,
/* /*
* r200,rv250,rs300,rv280 * r200,rv250,rs300,rv280
*/ */
extern int r200_copy_dma(struct radeon_device *rdev, struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence); struct reservation_object *resv);
void r200_set_safe_registers(struct radeon_device *rdev); void r200_set_safe_registers(struct radeon_device *rdev);
/* /*
...@@ -341,12 +341,14 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); ...@@ -341,12 +341,14 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_copy_cpdma(struct radeon_device *rdev, struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence **fence); unsigned num_gpu_pages,
int r600_copy_dma(struct radeon_device *rdev, struct reservation_object *resv);
uint64_t src_offset, uint64_t dst_offset, struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages, struct radeon_fence **fence); uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv);
void r600_hpd_init(struct radeon_device *rdev); void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev); void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
...@@ -462,10 +464,10 @@ bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc); ...@@ -462,10 +464,10 @@ bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc);
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
void r700_cp_stop(struct radeon_device *rdev); void r700_cp_stop(struct radeon_device *rdev);
void r700_cp_fini(struct radeon_device *rdev); void r700_cp_fini(struct radeon_device *rdev);
int rv770_copy_dma(struct radeon_device *rdev, struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence); struct reservation_object *resv);
u32 rv770_get_xclk(struct radeon_device *rdev); u32 rv770_get_xclk(struct radeon_device *rdev);
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int rv770_get_temp(struct radeon_device *rdev); int rv770_get_temp(struct radeon_device *rdev);
...@@ -536,10 +538,10 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, ...@@ -536,10 +538,10 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence); struct radeon_fence *fence);
void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib); struct radeon_ib *ib);
int evergreen_copy_dma(struct radeon_device *rdev, struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence); struct reservation_object *resv);
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
int evergreen_get_temp(struct radeon_device *rdev); int evergreen_get_temp(struct radeon_device *rdev);
...@@ -701,10 +703,10 @@ int si_vm_init(struct radeon_device *rdev); ...@@ -701,10 +703,10 @@ int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev); void si_vm_fini(struct radeon_device *rdev);
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int si_copy_dma(struct radeon_device *rdev, struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence); struct reservation_object *resv);
void si_dma_vm_copy_pages(struct radeon_device *rdev, void si_dma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib, struct radeon_ib *ib,
...@@ -760,14 +762,14 @@ bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, ...@@ -760,14 +762,14 @@ bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait); bool emit_wait);
void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cik_copy_dma(struct radeon_device *rdev, struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence); struct reservation_object *resv);
int cik_copy_cpdma(struct radeon_device *rdev, struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence); struct reservation_object *resv);
int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
......
...@@ -45,33 +45,29 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, ...@@ -45,33 +45,29 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
switch (flag) { switch (flag) {
case RADEON_BENCHMARK_COPY_DMA: case RADEON_BENCHMARK_COPY_DMA:
r = radeon_copy_dma(rdev, saddr, daddr, fence = radeon_copy_dma(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE, size / RADEON_GPU_PAGE_SIZE,
&fence); NULL);
break; break;
case RADEON_BENCHMARK_COPY_BLIT: case RADEON_BENCHMARK_COPY_BLIT:
r = radeon_copy_blit(rdev, saddr, daddr, fence = radeon_copy_blit(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE, size / RADEON_GPU_PAGE_SIZE,
&fence); NULL);
break; break;
default: default:
DRM_ERROR("Unknown copy method\n"); DRM_ERROR("Unknown copy method\n");
r = -EINVAL; return -EINVAL;
} }
if (r) if (IS_ERR(fence))
goto exit_do_move; return PTR_ERR(fence);
r = radeon_fence_wait(fence, false); r = radeon_fence_wait(fence, false);
if (r)
goto exit_do_move;
radeon_fence_unref(&fence); radeon_fence_unref(&fence);
if (r)
return r;
} }
end_jiffies = jiffies; end_jiffies = jiffies;
r = jiffies_to_msecs(end_jiffies - start_jiffies); return jiffies_to_msecs(end_jiffies - start_jiffies);
exit_do_move:
if (fence)
radeon_fence_unref(&fence);
return r;
} }
......
...@@ -183,6 +183,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) ...@@ -183,6 +183,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
} }
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].tv.shared = !r->write_domain;
p->relocs[i].handle = r->handle; p->relocs[i].handle = r->handle;
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
...@@ -254,16 +255,13 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p) ...@@ -254,16 +255,13 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
for (i = 0; i < p->nrelocs; i++) { for (i = 0; i < p->nrelocs; i++) {
struct reservation_object *resv; struct reservation_object *resv;
struct fence *fence;
if (!p->relocs[i].robj) if (!p->relocs[i].robj)
continue; continue;
resv = p->relocs[i].robj->tbo.resv; resv = p->relocs[i].robj->tbo.resv;
fence = reservation_object_get_excl(resv); radeon_semaphore_sync_resv(p->ib.semaphore, resv,
p->relocs[i].tv.shared);
radeon_semaphore_sync_to(p->ib.semaphore,
(struct radeon_fence *)fence);
} }
} }
...@@ -568,7 +566,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, ...@@ -568,7 +566,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out; goto out;
} }
radeon_cs_sync_rings(parser); radeon_cs_sync_rings(parser);
radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence); radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence);
if ((rdev->family >= CHIP_TAHITI) && if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) { (parser->chunk_const_ib_idx != -1)) {
......
...@@ -145,7 +145,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, ...@@ -145,7 +145,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
if (ib->vm) { if (ib->vm) {
struct radeon_fence *vm_id_fence; struct radeon_fence *vm_id_fence;
vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring); vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
radeon_semaphore_sync_to(ib->semaphore, vm_id_fence); radeon_semaphore_sync_fence(ib->semaphore, vm_id_fence);
} }
/* sync with other rings */ /* sync with other rings */
......
...@@ -96,15 +96,15 @@ bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx, ...@@ -96,15 +96,15 @@ bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
} }
/** /**
* radeon_semaphore_sync_to - use the semaphore to sync to a fence * radeon_semaphore_sync_fence - use the semaphore to sync to a fence
* *
* @semaphore: semaphore object to add fence to * @semaphore: semaphore object to add fence to
* @fence: fence to sync to * @fence: fence to sync to
* *
* Sync to the fence using this semaphore object * Sync to the fence using this semaphore object
*/ */
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
struct radeon_fence *fence) struct radeon_fence *fence)
{ {
struct radeon_fence *other; struct radeon_fence *other;
...@@ -115,6 +115,38 @@ void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, ...@@ -115,6 +115,38 @@ void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other); semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
} }
/**
* radeon_semaphore_sync_to - use the semaphore to sync to a reservation object
*
* @sema: semaphore object to add fence from reservation object to
* @resv: reservation object with embedded fence
* @shared: true if we should onyl sync to the exclusive fence
*
* Sync to the fence using this semaphore object
*/
void radeon_semaphore_sync_resv(struct radeon_semaphore *sema,
struct reservation_object *resv,
bool shared)
{
struct reservation_object_list *flist;
struct fence *f;
unsigned i;
/* always sync to the exclusive fence */
f = reservation_object_get_excl(resv);
radeon_semaphore_sync_fence(sema, (struct radeon_fence*)f);
flist = reservation_object_get_list(resv);
if (shared || !flist)
return;
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
reservation_object_held(resv));
radeon_semaphore_sync_fence(sema, (struct radeon_fence*)f);
}
}
/** /**
* radeon_semaphore_sync_rings - sync ring to all registered fences * radeon_semaphore_sync_rings - sync ring to all registered fences
* *
......
...@@ -116,11 +116,16 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) ...@@ -116,11 +116,16 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
radeon_bo_kunmap(gtt_obj[i]); radeon_bo_kunmap(gtt_obj[i]);
if (ring == R600_RING_TYPE_DMA_INDEX) if (ring == R600_RING_TYPE_DMA_INDEX)
r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
size / RADEON_GPU_PAGE_SIZE,
NULL);
else else
r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
if (r) { size / RADEON_GPU_PAGE_SIZE,
NULL);
if (IS_ERR(fence)) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i); DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
r = PTR_ERR(fence);
goto out_lclean_unpin; goto out_lclean_unpin;
} }
...@@ -162,11 +167,16 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) ...@@ -162,11 +167,16 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
radeon_bo_kunmap(vram_obj); radeon_bo_kunmap(vram_obj);
if (ring == R600_RING_TYPE_DMA_INDEX) if (ring == R600_RING_TYPE_DMA_INDEX)
r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
size / RADEON_GPU_PAGE_SIZE,
NULL);
else else
r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
if (r) { size / RADEON_GPU_PAGE_SIZE,
NULL);
if (IS_ERR(fence)) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i); DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
r = PTR_ERR(fence);
goto out_lclean_unpin; goto out_lclean_unpin;
} }
...@@ -222,7 +232,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) ...@@ -222,7 +232,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
radeon_bo_unreserve(gtt_obj[i]); radeon_bo_unreserve(gtt_obj[i]);
radeon_bo_unref(&gtt_obj[i]); radeon_bo_unref(&gtt_obj[i]);
} }
if (fence) if (fence && !IS_ERR(fence))
radeon_fence_unref(&fence); radeon_fence_unref(&fence);
break; break;
} }
......
...@@ -233,6 +233,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, ...@@ -233,6 +233,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
struct radeon_device *rdev; struct radeon_device *rdev;
uint64_t old_start, new_start; uint64_t old_start, new_start;
struct radeon_fence *fence; struct radeon_fence *fence;
unsigned num_pages;
int r, ridx; int r, ridx;
rdev = radeon_get_rdev(bo->bdev); rdev = radeon_get_rdev(bo->bdev);
...@@ -269,12 +270,11 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, ...@@ -269,12 +270,11 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
/* sync other rings */ num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
fence = (struct radeon_fence *)reservation_object_get_excl(bo->resv); fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv);
r = radeon_copy(rdev, old_start, new_start, if (IS_ERR(fence))
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ return PTR_ERR(fence);
&fence);
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, &fence->base, r = ttm_bo_move_accel_cleanup(bo, &fence->base,
evict, no_wait_gpu, new_mem); evict, no_wait_gpu, new_mem);
radeon_fence_unref(&fence); radeon_fence_unref(&fence);
......
...@@ -143,6 +143,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, ...@@ -143,6 +143,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo; list[0].tv.bo = &vm->page_directory->tbo;
list[0].tv.shared = false;
list[0].tiling_flags = 0; list[0].tiling_flags = 0;
list[0].handle = 0; list[0].handle = 0;
list_add(&list[0].tv.head, head); list_add(&list[0].tv.head, head);
...@@ -156,6 +157,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, ...@@ -156,6 +157,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo; list[idx].tv.bo = &list[idx].robj->tbo;
list[idx].tv.shared = false;
list[idx].tiling_flags = 0; list[idx].tiling_flags = 0;
list[idx].handle = 0; list[idx].handle = 0;
list_add(&list[idx++].tv.head, head); list_add(&list[idx++].tv.head, head);
...@@ -395,6 +397,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, ...@@ -395,6 +397,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
memset(&tv, 0, sizeof(tv)); memset(&tv, 0, sizeof(tv));
tv.bo = &bo->tbo; tv.bo = &bo->tbo;
tv.shared = false;
INIT_LIST_HEAD(&head); INIT_LIST_HEAD(&head);
list_add(&tv.head, &head); list_add(&tv.head, &head);
...@@ -693,15 +696,10 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, ...@@ -693,15 +696,10 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
incr, R600_PTE_VALID); incr, R600_PTE_VALID);
if (ib.length_dw != 0) { if (ib.length_dw != 0) {
struct fence *fence;
radeon_asic_vm_pad_ib(rdev, &ib); radeon_asic_vm_pad_ib(rdev, &ib);
fence = reservation_object_get_excl(pd->tbo.resv); radeon_semaphore_sync_resv(ib.semaphore, pd->tbo.resv, false);
radeon_semaphore_sync_to(ib.semaphore, radeon_semaphore_sync_fence(ib.semaphore, vm->last_id_use);
(struct radeon_fence *)fence);
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
WARN_ON(ib.length_dw > ndw); WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL, false); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
...@@ -826,11 +824,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, ...@@ -826,11 +824,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_bo *pt = vm->page_tables[pt_idx].bo; struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
unsigned nptes; unsigned nptes;
uint64_t pte; uint64_t pte;
struct fence *fence;
fence = reservation_object_get_excl(pt->tbo.resv); radeon_semaphore_sync_resv(ib->semaphore, pt->tbo.resv, false);
radeon_semaphore_sync_to(ib->semaphore,
(struct radeon_fence *)fence);
if ((addr & ~mask) == (end & ~mask)) if ((addr & ~mask) == (end & ~mask))
nptes = end - addr; nptes = end - addr;
...@@ -972,7 +967,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, ...@@ -972,7 +967,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
radeon_asic_vm_pad_ib(rdev, &ib); radeon_asic_vm_pad_ib(rdev, &ib);
WARN_ON(ib.length_dw > ndw); WARN_ON(ib.length_dw > ndw);
radeon_semaphore_sync_to(ib.semaphore, vm->fence); radeon_semaphore_sync_fence(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL, false); r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) { if (r) {
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
......
...@@ -33,18 +33,19 @@ ...@@ -33,18 +33,19 @@
* @src_offset: src GPU address * @src_offset: src GPU address
* @dst_offset: dst GPU address * @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer * @num_gpu_pages: number of GPU pages to xfer
* @fence: radeon fence object * @resv: reservation object to sync to
* *
* Copy GPU paging using the DMA engine (r7xx). * Copy GPU paging using the DMA engine (r7xx).
* Used by the radeon ttm implementation to move pages if * Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback. * registered as the asic copy callback.
*/ */
int rv770_copy_dma(struct radeon_device *rdev, struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence) struct reservation_object *resv)
{ {
struct radeon_semaphore *sem = NULL; struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
int ring_index = rdev->asic->copy.dma_ring_index; int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index]; struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw; u32 size_in_dw, cur_size_in_dw;
...@@ -54,7 +55,7 @@ int rv770_copy_dma(struct radeon_device *rdev, ...@@ -54,7 +55,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
r = radeon_semaphore_create(rdev, &sem); r = radeon_semaphore_create(rdev, &sem);
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); DRM_ERROR("radeon: moving bo (%d).\n", r);
return r; return ERR_PTR(r);
} }
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
...@@ -63,10 +64,10 @@ int rv770_copy_dma(struct radeon_device *rdev, ...@@ -63,10 +64,10 @@ int rv770_copy_dma(struct radeon_device *rdev,
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL); radeon_semaphore_free(rdev, &sem, NULL);
return r; return ERR_PTR(r);
} }
radeon_semaphore_sync_to(sem, *fence); radeon_semaphore_sync_resv(sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx); radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) { for (i = 0; i < num_loops; i++) {
...@@ -83,15 +84,15 @@ int rv770_copy_dma(struct radeon_device *rdev, ...@@ -83,15 +84,15 @@ int rv770_copy_dma(struct radeon_device *rdev,
dst_offset += cur_size_in_dw * 4; dst_offset += cur_size_in_dw * 4;
} }
r = radeon_fence_emit(rdev, fence, ring->idx); r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) { if (r) {
radeon_ring_unlock_undo(rdev, ring); radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL); radeon_semaphore_free(rdev, &sem, NULL);
return r; return ERR_PTR(r);
} }
radeon_ring_unlock_commit(rdev, ring, false); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, fence);
return r; return fence;
} }
...@@ -218,18 +218,19 @@ void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) ...@@ -218,18 +218,19 @@ void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
* @src_offset: src GPU address * @src_offset: src GPU address
* @dst_offset: dst GPU address * @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer * @num_gpu_pages: number of GPU pages to xfer
* @fence: radeon fence object * @resv: reservation object to sync to
* *
* Copy GPU paging using the DMA engine (SI). * Copy GPU paging using the DMA engine (SI).
* Used by the radeon ttm implementation to move pages if * Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback. * registered as the asic copy callback.
*/ */
int si_copy_dma(struct radeon_device *rdev, struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence) struct reservation_object *resv)
{ {
struct radeon_semaphore *sem = NULL; struct radeon_semaphore *sem = NULL;
struct radeon_fence *fence;
int ring_index = rdev->asic->copy.dma_ring_index; int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index]; struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes; u32 size_in_bytes, cur_size_in_bytes;
...@@ -239,7 +240,7 @@ int si_copy_dma(struct radeon_device *rdev, ...@@ -239,7 +240,7 @@ int si_copy_dma(struct radeon_device *rdev,
r = radeon_semaphore_create(rdev, &sem); r = radeon_semaphore_create(rdev, &sem);
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); DRM_ERROR("radeon: moving bo (%d).\n", r);
return r; return ERR_PTR(r);
} }
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
...@@ -248,10 +249,10 @@ int si_copy_dma(struct radeon_device *rdev, ...@@ -248,10 +249,10 @@ int si_copy_dma(struct radeon_device *rdev,
if (r) { if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r); DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL); radeon_semaphore_free(rdev, &sem, NULL);
return r; return ERR_PTR(r);
} }
radeon_semaphore_sync_to(sem, *fence); radeon_semaphore_sync_resv(sem, resv, false);
radeon_semaphore_sync_rings(rdev, sem, ring->idx); radeon_semaphore_sync_rings(rdev, sem, ring->idx);
for (i = 0; i < num_loops; i++) { for (i = 0; i < num_loops; i++) {
...@@ -268,16 +269,16 @@ int si_copy_dma(struct radeon_device *rdev, ...@@ -268,16 +269,16 @@ int si_copy_dma(struct radeon_device *rdev,
dst_offset += cur_size_in_bytes; dst_offset += cur_size_in_bytes;
} }
r = radeon_fence_emit(rdev, fence, ring->idx); r = radeon_fence_emit(rdev, &fence, ring->idx);
if (r) { if (r) {
radeon_ring_unlock_undo(rdev, ring); radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL); radeon_semaphore_free(rdev, &sem, NULL);
return r; return ERR_PTR(r);
} }
radeon_ring_unlock_commit(rdev, ring, false); radeon_ring_unlock_commit(rdev, ring, false);
radeon_semaphore_free(rdev, &sem, *fence); radeon_semaphore_free(rdev, &sem, fence);
return r; return fence;
} }
...@@ -119,8 +119,14 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -119,8 +119,14 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
ret = -EBUSY; ret = -EBUSY;
} }
if (!ret) if (!ret) {
continue; if (!entry->shared)
continue;
ret = reservation_object_reserve_shared(bo->resv);
if (!ret)
continue;
}
/* uh oh, we lost out, drop every reservation and try /* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer, then start over if * to only reserve this buffer, then start over if
...@@ -136,6 +142,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -136,6 +142,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
ret = 0; ret = 0;
} }
if (!ret && entry->shared)
ret = reservation_object_reserve_shared(bo->resv);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
if (ret == -EINTR) if (ret == -EINTR)
ret = -ERESTARTSYS; ret = -ERESTARTSYS;
...@@ -183,7 +192,10 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, ...@@ -183,7 +192,10 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
bo = entry->bo; bo = entry->bo;
reservation_object_add_excl_fence(bo->resv, fence); if (entry->shared)
reservation_object_add_shared_fence(bo->resv, fence);
else
reservation_object_add_excl_fence(bo->resv, fence);
ttm_bo_add_to_lru(bo); ttm_bo_add_to_lru(bo);
__ttm_bo_unreserve(bo); __ttm_bo_unreserve(bo);
} }
......
...@@ -346,6 +346,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, ...@@ -346,6 +346,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
++sw_context->cur_val_buf; ++sw_context->cur_val_buf;
val_buf = &vval_buf->base; val_buf = &vval_buf->base;
val_buf->bo = ttm_bo_reference(bo); val_buf->bo = ttm_bo_reference(bo);
val_buf->shared = false;
list_add_tail(&val_buf->head, &sw_context->validate_nodes); list_add_tail(&val_buf->head, &sw_context->validate_nodes);
vval_buf->validate_as_mob = validate_as_mob; vval_buf->validate_as_mob = validate_as_mob;
} }
...@@ -2670,9 +2671,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, ...@@ -2670,9 +2671,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&validate_list); INIT_LIST_HEAD(&validate_list);
pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
pinned_val.shared = false;
list_add_tail(&pinned_val.head, &validate_list); list_add_tail(&pinned_val.head, &validate_list);
query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
query_val.shared = false;
list_add_tail(&query_val.head, &validate_list); list_add_tail(&query_val.head, &validate_list);
ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false); ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
......
...@@ -133,6 +133,7 @@ static void vmw_resource_release(struct kref *kref) ...@@ -133,6 +133,7 @@ static void vmw_resource_release(struct kref *kref)
struct ttm_validate_buffer val_buf; struct ttm_validate_buffer val_buf;
val_buf.bo = bo; val_buf.bo = bo;
val_buf.shared = false;
res->func->unbind(res, false, &val_buf); res->func->unbind(res, false, &val_buf);
} }
res->backup_dirty = false; res->backup_dirty = false;
...@@ -1219,6 +1220,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, ...@@ -1219,6 +1220,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
INIT_LIST_HEAD(&val_list); INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base); val_buf->bo = ttm_bo_reference(&res->backup->base);
val_buf->shared = false;
list_add_tail(&val_buf->head, &val_list); list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible); ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -1312,6 +1314,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) ...@@ -1312,6 +1314,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
BUG_ON(!func->may_evict); BUG_ON(!func->may_evict);
val_buf.bo = NULL; val_buf.bo = NULL;
val_buf.shared = false;
ret = vmw_resource_check_buffer(res, interruptible, &val_buf); ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -1357,6 +1360,7 @@ int vmw_resource_validate(struct vmw_resource *res) ...@@ -1357,6 +1360,7 @@ int vmw_resource_validate(struct vmw_resource *res)
return 0; return 0;
val_buf.bo = NULL; val_buf.bo = NULL;
val_buf.shared = false;
if (res->backup) if (res->backup)
val_buf.bo = &res->backup->base; val_buf.bo = &res->backup->base;
do { do {
...@@ -1474,6 +1478,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, ...@@ -1474,6 +1478,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_validate_buffer val_buf; struct ttm_validate_buffer val_buf;
val_buf.bo = bo; val_buf.bo = bo;
val_buf.shared = false;
list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
......
...@@ -39,11 +39,13 @@ ...@@ -39,11 +39,13 @@
* *
* @head: list head for thread-private list. * @head: list head for thread-private list.
* @bo: refcounted buffer object pointer. * @bo: refcounted buffer object pointer.
* @shared: should the fence be added shared?
*/ */
struct ttm_validate_buffer { struct ttm_validate_buffer {
struct list_head head; struct list_head head;
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
bool shared;
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment