Commit 9cca0b8e authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: move amdgpu_cs_sysvm_access_required into find_mapping

When we need to find the mapping we need sysvm access anyway.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarLeo Liu <leo.liu@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f5683f8b
...@@ -179,6 +179,7 @@ struct amdgpu_job; ...@@ -179,6 +179,7 @@ struct amdgpu_job;
struct amdgpu_irq_src; struct amdgpu_irq_src;
struct amdgpu_fpriv; struct amdgpu_fpriv;
struct amdgpu_mn; struct amdgpu_mn;
struct amdgpu_bo_va_mapping;
enum amdgpu_cp_irq { enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_EOP = 0, AMDGPU_CP_IRQ_GFX_EOP = 0,
...@@ -1900,10 +1901,9 @@ static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } ...@@ -1900,10 +1901,9 @@ static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
#endif #endif
struct amdgpu_bo_va_mapping * int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo,
uint64_t addr, struct amdgpu_bo **bo); struct amdgpu_bo_va_mapping **mapping);
int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
#include "amdgpu_object.h" #include "amdgpu_object.h"
#endif #endif
...@@ -921,11 +921,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -921,11 +921,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
uint64_t offset; uint64_t offset;
uint8_t *kptr; uint8_t *kptr;
m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start, r = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
&aobj); &aobj, &m);
if (!aobj) { if (r) {
DRM_ERROR("IB va_start is invalid\n"); DRM_ERROR("IB va_start is invalid\n");
return -EINVAL; return r;
} }
if ((chunk_ib->va_start + chunk_ib->ib_bytes) > if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
...@@ -1475,15 +1475,16 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, ...@@ -1475,15 +1475,16 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
* virtual memory address. Returns allocation structure when found, NULL * virtual memory address. Returns allocation structure when found, NULL
* otherwise. * otherwise.
*/ */
struct amdgpu_bo_va_mapping * int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo,
uint64_t addr, struct amdgpu_bo **bo) struct amdgpu_bo_va_mapping **map)
{ {
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
unsigned i; unsigned i;
int r;
if (!parser->bo_list) if (!parser->bo_list)
return NULL; return 0;
addr /= AMDGPU_GPU_PAGE_SIZE; addr /= AMDGPU_GPU_PAGE_SIZE;
...@@ -1500,7 +1501,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, ...@@ -1500,7 +1501,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
continue; continue;
*bo = lobj->bo_va->base.bo; *bo = lobj->bo_va->base.bo;
return mapping; *map = mapping;
goto found;
} }
list_for_each_entry(mapping, &lobj->bo_va->invalids, list) { list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
...@@ -1509,44 +1511,22 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, ...@@ -1509,44 +1511,22 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
continue; continue;
*bo = lobj->bo_va->base.bo; *bo = lobj->bo_va->base.bo;
return mapping; *map = mapping;
goto found;
} }
} }
return NULL; return -EINVAL;
}
/** found:
* amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM r = amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
* if (unlikely(r))
* @parser: command submission parser context return r;
*
* Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
*/
int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
{
unsigned i;
int r;
if (!parser->bo_list) if ((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
return 0; return 0;
for (i = 0; i < parser->bo_list->num_entries; i++) { (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
struct amdgpu_bo *bo = parser->bo_list->array[i].robj; amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
return ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false, false);
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r))
return r;
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
continue;
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r))
return r;
}
return 0;
} }
...@@ -410,10 +410,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) ...@@ -410,10 +410,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r = 0; int r = 0;
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
if (mapping == NULL) { if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
return -EINVAL; return r;
} }
if (!ctx->parser->adev->uvd.address_64_bit) { if (!ctx->parser->adev->uvd.address_64_bit) {
...@@ -737,10 +737,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) ...@@ -737,10 +737,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx); uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r; int r;
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
if (mapping == NULL) { if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
return -EINVAL; return r;
} }
start = amdgpu_bo_gpu_offset(bo); start = amdgpu_bo_gpu_offset(bo);
...@@ -917,10 +917,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) ...@@ -917,10 +917,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return -EINVAL; return -EINVAL;
} }
r = amdgpu_cs_sysvm_access_required(parser);
if (r)
return r;
ctx.parser = parser; ctx.parser = parser;
ctx.buf_sizes = buf_sizes; ctx.buf_sizes = buf_sizes;
ctx.ib_idx = ib_idx; ctx.ib_idx = ib_idx;
......
...@@ -559,6 +559,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, ...@@ -559,6 +559,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
uint64_t addr; uint64_t addr;
int r;
if (index == 0xffffffff) if (index == 0xffffffff)
index = 0; index = 0;
...@@ -567,11 +568,11 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, ...@@ -567,11 +568,11 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
addr += ((uint64_t)size) * ((uint64_t)index); addr += ((uint64_t)size) * ((uint64_t)index);
mapping = amdgpu_cs_find_mapping(p, addr, &bo); r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
if (mapping == NULL) { if (r) {
DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n", DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
addr, lo, hi, size, index); addr, lo, hi, size, index);
return -EINVAL; return r;
} }
if ((addr + (uint64_t)size) > if ((addr + (uint64_t)size) >
...@@ -652,10 +653,6 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) ...@@ -652,10 +653,6 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
p->job->vm = NULL; p->job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
r = amdgpu_cs_sysvm_access_required(p);
if (r)
return r;
while (idx < ib->length_dw) { while (idx < ib->length_dw) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment