Commit 20f4eff1 authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher

drm/amdgpu: sync bo and shadow V3

Use shadow flag to judge which direction to sync.
V2:
Don't need bo pin, so remove it.

V3:
1. Split to two functions, one is backup_to_shadow, another is
restore_from_shadow.
2. Clean up previous shadow direction difinitions.
Signed-off-by: default avatarChunming Zhou <David1.Zhou@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e24db985
......@@ -438,12 +438,6 @@ struct amdgpu_bo_va {
#define AMDGPU_GEM_DOMAIN_MAX 0x3
enum amdgpu_bo_shadow {
AMDGPU_BO_SHADOW_TO_NONE = 0,
AMDGPU_BO_SHADOW_TO_PARENT,
AMDGPU_BO_SHADOW_TO_SHADOW,
};
struct amdgpu_bo {
/* Protected by gem.mutex */
struct list_head list;
......@@ -470,8 +464,6 @@ struct amdgpu_bo {
struct drm_gem_object gem_base;
struct amdgpu_bo *parent;
struct amdgpu_bo *shadow;
/* indicate if need to sync between bo and shadow */
enum amdgpu_bo_shadow backup_shadow;
struct ttm_bo_kmap_obj dma_buf_vmap;
struct amdgpu_mn *mn;
......
......@@ -445,6 +445,70 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
}
int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
struct fence **fence,
bool direct)
{
struct amdgpu_bo *shadow = bo->shadow;
uint64_t bo_addr, shadow_addr;
int r;
if (!shadow)
return -EINVAL;
bo_addr = amdgpu_bo_gpu_offset(bo);
shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
goto err;
r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
amdgpu_bo_size(bo), resv, fence,
direct);
if (!r)
amdgpu_bo_fence(bo, *fence, true);
err:
return r;
}
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
struct fence **fence,
bool direct)
{
struct amdgpu_bo *shadow = bo->shadow;
uint64_t bo_addr, shadow_addr;
int r;
if (!shadow)
return -EINVAL;
bo_addr = amdgpu_bo_gpu_offset(bo);
shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
goto err;
r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
amdgpu_bo_size(bo), resv, fence,
direct);
if (!r)
amdgpu_bo_fence(bo, *fence, true);
err:
return r;
}
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
{
bool is_iomem;
......
......@@ -155,6 +155,18 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
bool shared);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
struct fence **fence, bool direct);
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
struct fence **fence,
bool direct);
/*
* sub allocation
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment