Commit 3216c6b7 authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher

drm/amdgpu: use amdgpu_bo_param for amdgpu_bo_create v2

After that, we can easily add new parameter when need.

v2:
a) rebase.
b) Initialize struct amdgpu_bo_param, future new
member could only be used in some one case, but all member
should have its own initial value.
Signed-off-by: default avatarChunming Zhou <david1.zhou@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com> (v1)
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarJunwei Zhang <Jerry.Zhang@amd.com>
Cc: christian.koenig@amd.com
Cc: Felix.Kuehling@amd.com
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent a906dbb1
...@@ -217,13 +217,19 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, ...@@ -217,13 +217,19 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL; struct amdgpu_bo *bo = NULL;
struct amdgpu_bo_param bp;
int r; int r;
uint64_t gpu_addr_tmp = 0; uint64_t gpu_addr_tmp = 0;
void *cpu_ptr_tmp = NULL; void *cpu_ptr_tmp = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, memset(&bp, 0, sizeof(bp));
AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel, bp.size = size;
NULL, &bo); bp.byte_align = PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_GTT;
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
r = amdgpu_bo_create(adev, &bp, &bo);
if (r) { if (r) {
dev_err(adev->dev, dev_err(adev->dev,
"failed to allocate BO for amdkfd (%d)\n", r); "failed to allocate BO for amdkfd (%d)\n", r);
......
...@@ -1004,6 +1004,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -1004,6 +1004,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
int byte_align; int byte_align;
u32 alloc_domain; u32 alloc_domain;
u64 alloc_flags; u64 alloc_flags;
...@@ -1069,8 +1070,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ...@@ -1069,8 +1070,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
va, size, domain_string(alloc_domain)); va, size, domain_string(alloc_domain));
ret = amdgpu_bo_create(adev, size, byte_align, memset(&bp, 0, sizeof(bp));
alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo); bp.size = size;
bp.byte_align = byte_align;
bp.domain = alloc_domain;
bp.flags = alloc_flags;
bp.type = ttm_bo_type_device;
bp.resv = NULL;
ret = amdgpu_bo_create(adev, &bp, &bo);
if (ret) { if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n", pr_debug("Failed to create BO on domain %s. ret %d\n",
domain_string(alloc_domain), ret); domain_string(alloc_domain), ret);
......
...@@ -75,13 +75,20 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, ...@@ -75,13 +75,20 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
{ {
struct amdgpu_bo *dobj = NULL; struct amdgpu_bo *dobj = NULL;
struct amdgpu_bo *sobj = NULL; struct amdgpu_bo *sobj = NULL;
struct amdgpu_bo_param bp;
uint64_t saddr, daddr; uint64_t saddr, daddr;
int r, n; int r, n;
int time; int time;
memset(&bp, 0, sizeof(bp));
bp.size = size;
bp.byte_align = PAGE_SIZE;
bp.domain = sdomain;
bp.flags = 0;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
n = AMDGPU_BENCHMARK_ITERATIONS; n = AMDGPU_BENCHMARK_ITERATIONS;
r = amdgpu_bo_create(adev, size, PAGE_SIZE,sdomain, 0, r = amdgpu_bo_create(adev, &bp, &sobj);
ttm_bo_type_kernel, NULL, &sobj);
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
...@@ -93,8 +100,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, ...@@ -93,8 +100,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
r = amdgpu_bo_create(adev, size, PAGE_SIZE, ddomain, 0, bp.domain = ddomain;
ttm_bo_type_kernel, NULL, &dobj); r = amdgpu_bo_create(adev, &bp, &dobj);
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
......
...@@ -113,12 +113,17 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) ...@@ -113,12 +113,17 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
int r; int r;
if (adev->gart.robj == NULL) { if (adev->gart.robj == NULL) {
r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE, struct amdgpu_bo_param bp;
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | memset(&bp, 0, sizeof(bp));
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, bp.size = adev->gart.table_size;
ttm_bo_type_kernel, NULL, bp.byte_align = PAGE_SIZE;
&adev->gart.robj); bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
r = amdgpu_bo_create(adev, &bp, &adev->gart.robj);
if (r) { if (r) {
return r; return r;
} }
......
...@@ -48,17 +48,24 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, ...@@ -48,17 +48,24 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
struct drm_gem_object **obj) struct drm_gem_object **obj)
{ {
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
int r; int r;
memset(&bp, 0, sizeof(bp));
*obj = NULL; *obj = NULL;
/* At least align on page size */ /* At least align on page size */
if (alignment < PAGE_SIZE) { if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE; alignment = PAGE_SIZE;
} }
bp.size = size;
bp.byte_align = alignment;
bp.type = type;
bp.resv = resv;
retry: retry:
r = amdgpu_bo_create(adev, size, alignment, initial_domain, bp.flags = flags;
flags, type, resv, &bo); bp.domain = initial_domain;
r = amdgpu_bo_create(adev, &bp, &bo);
if (r) { if (r) {
if (r != -ERESTARTSYS) { if (r != -ERESTARTSYS) {
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
......
...@@ -191,14 +191,21 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev, ...@@ -191,14 +191,21 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
u32 domain, struct amdgpu_bo **bo_ptr, u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr) u64 *gpu_addr, void **cpu_addr)
{ {
struct amdgpu_bo_param bp;
bool free = false; bool free = false;
int r; int r;
memset(&bp, 0, sizeof(bp));
bp.size = size;
bp.byte_align = align;
bp.domain = domain;
bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
if (!*bo_ptr) { if (!*bo_ptr) {
r = amdgpu_bo_create(adev, size, align, domain, r = amdgpu_bo_create(adev, &bp, bo_ptr);
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
ttm_bo_type_kernel, NULL, bo_ptr);
if (r) { if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
r); r);
...@@ -470,20 +477,21 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, ...@@ -470,20 +477,21 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
unsigned long size, int byte_align, unsigned long size, int byte_align,
struct amdgpu_bo *bo) struct amdgpu_bo *bo)
{ {
struct amdgpu_bo_param bp = { struct amdgpu_bo_param bp;
.size = size,
.byte_align = byte_align,
.domain = AMDGPU_GEM_DOMAIN_GTT,
.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW,
.type = ttm_bo_type_kernel,
.resv = bo->tbo.resv
};
int r; int r;
if (bo->shadow) if (bo->shadow)
return 0; return 0;
memset(&bp, 0, sizeof(bp));
bp.size = size;
bp.byte_align = byte_align;
bp.domain = AMDGPU_GEM_DOMAIN_GTT;
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW;
bp.type = ttm_bo_type_kernel;
bp.resv = bo->tbo.resv;
r = amdgpu_bo_do_create(adev, &bp, &bo->shadow); r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
if (!r) { if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo); bo->shadow->parent = amdgpu_bo_ref(bo);
...@@ -495,34 +503,26 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, ...@@ -495,34 +503,26 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
return r; return r;
} }
int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, int amdgpu_bo_create(struct amdgpu_device *adev,
int byte_align, u32 domain, struct amdgpu_bo_param *bp,
u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
struct amdgpu_bo **bo_ptr) struct amdgpu_bo **bo_ptr)
{ {
struct amdgpu_bo_param bp = { u64 flags = bp->flags;
.size = size,
.byte_align = byte_align,
.domain = domain,
.flags = flags & ~AMDGPU_GEM_CREATE_SHADOW,
.type = type,
.resv = resv
};
int r; int r;
r = amdgpu_bo_do_create(adev, &bp, bo_ptr); bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
r = amdgpu_bo_do_create(adev, bp, bo_ptr);
if (r) if (r)
return r; return r;
if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) { if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
if (!resv) if (!bp->resv)
WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
NULL)); NULL));
r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr));
if (!resv) if (!bp->resv)
reservation_object_unlock((*bo_ptr)->tbo.resv); reservation_object_unlock((*bo_ptr)->tbo.resv);
if (r) if (r)
......
...@@ -233,10 +233,8 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) ...@@ -233,10 +233,8 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
} }
int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, int amdgpu_bo_create(struct amdgpu_device *adev,
int byte_align, u32 domain, struct amdgpu_bo_param *bp,
u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
struct amdgpu_bo **bo_ptr); struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_reserved(struct amdgpu_device *adev, int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align, unsigned long size, int align,
......
...@@ -102,12 +102,18 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, ...@@ -102,12 +102,18 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct reservation_object *resv = attach->dmabuf->resv; struct reservation_object *resv = attach->dmabuf->resv;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
int ret; int ret;
memset(&bp, 0, sizeof(bp));
bp.size = attach->dmabuf->size;
bp.byte_align = PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_CPU;
bp.flags = 0;
bp.type = ttm_bo_type_sg;
bp.resv = resv;
ww_mutex_lock(&resv->lock, NULL); ww_mutex_lock(&resv->lock, NULL);
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, ret = amdgpu_bo_create(adev, &bp, &bo);
AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg,
resv, &bo);
if (ret) if (ret)
goto error; goto error;
......
...@@ -33,6 +33,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) ...@@ -33,6 +33,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_bo *vram_obj = NULL; struct amdgpu_bo *vram_obj = NULL;
struct amdgpu_bo **gtt_obj = NULL; struct amdgpu_bo **gtt_obj = NULL;
struct amdgpu_bo_param bp;
uint64_t gart_addr, vram_addr; uint64_t gart_addr, vram_addr;
unsigned n, size; unsigned n, size;
int i, r; int i, r;
...@@ -58,9 +59,15 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) ...@@ -58,9 +59,15 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = 1; r = 1;
goto out_cleanup; goto out_cleanup;
} }
memset(&bp, 0, sizeof(bp));
r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 0, bp.size = size;
ttm_bo_type_kernel, NULL, &vram_obj); bp.byte_align = PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
bp.flags = 0;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
r = amdgpu_bo_create(adev, &bp, &vram_obj);
if (r) { if (r) {
DRM_ERROR("Failed to create VRAM object\n"); DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup; goto out_cleanup;
...@@ -79,9 +86,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) ...@@ -79,9 +86,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
void **vram_start, **vram_end; void **vram_start, **vram_end;
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, bp.domain = AMDGPU_GEM_DOMAIN_GTT;
AMDGPU_GEM_DOMAIN_GTT, 0, r = amdgpu_bo_create(adev, &bp, gtt_obj + i);
ttm_bo_type_kernel, NULL, gtt_obj + i);
if (r) { if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i); DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean; goto out_lclean;
......
...@@ -1316,6 +1316,7 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) ...@@ -1316,6 +1316,7 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
{ {
struct ttm_operation_ctx ctx = { false, false }; struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo_param bp;
int r = 0; int r = 0;
int i; int i;
u64 vram_size = adev->gmc.visible_vram_size; u64 vram_size = adev->gmc.visible_vram_size;
...@@ -1323,17 +1324,21 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) ...@@ -1323,17 +1324,21 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
u64 size = adev->fw_vram_usage.size; u64 size = adev->fw_vram_usage.size;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
memset(&bp, 0, sizeof(bp));
bp.size = adev->fw_vram_usage.size;
bp.byte_align = PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
adev->fw_vram_usage.va = NULL; adev->fw_vram_usage.va = NULL;
adev->fw_vram_usage.reserved_bo = NULL; adev->fw_vram_usage.reserved_bo = NULL;
if (adev->fw_vram_usage.size > 0 && if (adev->fw_vram_usage.size > 0 &&
adev->fw_vram_usage.size <= vram_size) { adev->fw_vram_usage.size <= vram_size) {
r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE, r = amdgpu_bo_create(adev, &bp,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
ttm_bo_type_kernel, NULL,
&adev->fw_vram_usage.reserved_bo); &adev->fw_vram_usage.reserved_bo);
if (r) if (r)
goto error_create; goto error_create;
......
...@@ -412,11 +412,16 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, ...@@ -412,11 +412,16 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
struct amdgpu_bo *pt; struct amdgpu_bo *pt;
if (!entry->base.bo) { if (!entry->base.bo) {
r = amdgpu_bo_create(adev, struct amdgpu_bo_param bp;
amdgpu_vm_bo_size(adev, level),
AMDGPU_GPU_PAGE_SIZE, memset(&bp, 0, sizeof(bp));
AMDGPU_GEM_DOMAIN_VRAM, flags, bp.size = amdgpu_vm_bo_size(adev, level);
ttm_bo_type_kernel, resv, &pt); bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
bp.flags = flags;
bp.type = ttm_bo_type_kernel;
bp.resv = resv;
r = amdgpu_bo_create(adev, &bp, &pt);
if (r) if (r)
return r; return r;
...@@ -2368,6 +2373,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, ...@@ -2368,6 +2373,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int vm_context, unsigned int pasid) int vm_context, unsigned int pasid)
{ {
struct amdgpu_bo_param bp;
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
AMDGPU_VM_PTE_COUNT(adev) * 8); AMDGPU_VM_PTE_COUNT(adev) * 8);
unsigned ring_instance; unsigned ring_instance;
...@@ -2422,8 +2428,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2422,8 +2428,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
flags |= AMDGPU_GEM_CREATE_SHADOW; flags |= AMDGPU_GEM_CREATE_SHADOW;
size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level); size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags, memset(&bp, 0, sizeof(bp));
ttm_bo_type_kernel, NULL, &vm->root.base.bo); bp.size = size;
bp.byte_align = align;
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
bp.flags = flags;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
r = amdgpu_bo_create(adev, &bp, &vm->root.base.bo);
if (r) if (r)
goto error_free_sched_entity; goto error_free_sched_entity;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment