Commit fc9c8f54 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: add vm_needs_flush parameter to amdgpu_copy_buffer

This allows us to flush the system VM here.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Acked-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
parent df264f9e
...@@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, ...@@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence, r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
false); false, false);
if (r) if (r)
goto exit_do_move; goto exit_do_move;
r = dma_fence_wait(fence, false); r = dma_fence_wait(fence, false);
......
...@@ -535,7 +535,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, ...@@ -535,7 +535,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr, r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
amdgpu_bo_size(bo), resv, fence, amdgpu_bo_size(bo), resv, fence,
direct); direct, false);
if (!r) if (!r)
amdgpu_bo_fence(bo, *fence, true); amdgpu_bo_fence(bo, *fence, true);
...@@ -588,7 +588,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, ...@@ -588,7 +588,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr, r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
amdgpu_bo_size(bo), resv, fence, amdgpu_bo_size(bo), resv, fence,
direct); direct, false);
if (!r) if (!r)
amdgpu_bo_fence(bo, *fence, true); amdgpu_bo_fence(bo, *fence, true);
......
...@@ -111,7 +111,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) ...@@ -111,7 +111,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(gtt_obj[i]); amdgpu_bo_kunmap(gtt_obj[i]);
r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr, r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
size, NULL, &fence, false); size, NULL, &fence, false, false);
if (r) { if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i); DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
...@@ -156,7 +156,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) ...@@ -156,7 +156,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(vram_obj); amdgpu_bo_kunmap(vram_obj);
r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr, r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
size, NULL, &fence, false); size, NULL, &fence, false, false);
if (r) { if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i); DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
......
...@@ -318,7 +318,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, ...@@ -318,7 +318,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
r = amdgpu_copy_buffer(ring, old_start, new_start, r = amdgpu_copy_buffer(ring, old_start, new_start,
cur_pages * PAGE_SIZE, cur_pages * PAGE_SIZE,
bo->resv, &next, false); bo->resv, &next, false, false);
if (r) if (r)
goto error; goto error;
...@@ -1256,12 +1256,11 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -1256,12 +1256,11 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
return ttm_bo_mmap(filp, vma, &adev->mman.bdev); return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
} }
int amdgpu_copy_buffer(struct amdgpu_ring *ring, int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv, struct reservation_object *resv,
struct dma_fence **fence, bool direct_submit) struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job; struct amdgpu_job *job;
...@@ -1283,6 +1282,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, ...@@ -1283,6 +1282,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
if (r) if (r)
return r; return r;
job->vm_needs_flush = vm_needs_flush;
if (resv) { if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv, r = amdgpu_sync_resv(adev, &job->sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED); AMDGPU_FENCE_OWNER_UNDEFINED);
......
...@@ -61,12 +61,11 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, ...@@ -61,12 +61,11 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_mem_reg *mem); struct ttm_mem_reg *mem);
int amdgpu_copy_buffer(struct amdgpu_ring *ring, int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv, struct reservation_object *resv,
struct dma_fence **fence, bool direct_submit); struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush);
int amdgpu_fill_buffer(struct amdgpu_bo *bo, int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data, uint32_t src_data,
struct reservation_object *resv, struct reservation_object *resv,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment