Commit 19be5570 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: add operation ctx to ttm_bo_validate v2

Give moving a BO into place an operation context to work with.

v2: rebased
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Tested-by: default avatarDieter Nützel <Dieter@nuetzel-hh.de>
Tested-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Acked-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 750a2503
...@@ -343,6 +343,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, ...@@ -343,6 +343,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo) struct amdgpu_bo *bo)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { true, false };
u64 initial_bytes_moved, bytes_moved; u64 initial_bytes_moved, bytes_moved;
uint32_t domain; uint32_t domain;
int r; int r;
...@@ -374,7 +375,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, ...@@ -374,7 +375,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
retry: retry:
amdgpu_ttm_placement_from_domain(bo, domain); amdgpu_ttm_placement_from_domain(bo, domain);
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
bytes_moved = atomic64_read(&adev->num_bytes_moved) - bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved; initial_bytes_moved;
p->bytes_moved += bytes_moved; p->bytes_moved += bytes_moved;
...@@ -396,6 +397,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, ...@@ -396,6 +397,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo *validated) struct amdgpu_bo *validated)
{ {
uint32_t domain = validated->allowed_domains; uint32_t domain = validated->allowed_domains;
struct ttm_operation_ctx ctx = { true, false };
int r; int r;
if (!p->evictable) if (!p->evictable)
...@@ -437,7 +439,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, ...@@ -437,7 +439,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
bo->tbo.mem.mem_type == TTM_PL_VRAM && bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT; bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT;
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
bytes_moved = atomic64_read(&adev->num_bytes_moved) - bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved; initial_bytes_moved;
p->bytes_moved += bytes_moved; p->bytes_moved += bytes_moved;
...@@ -476,6 +478,7 @@ static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo) ...@@ -476,6 +478,7 @@ static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated) struct list_head *validated)
{ {
struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_bo_list_entry *lobj; struct amdgpu_bo_list_entry *lobj;
int r; int r;
...@@ -493,8 +496,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, ...@@ -493,8 +496,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
lobj->user_pages) { lobj->user_pages) {
amdgpu_ttm_placement_from_domain(bo, amdgpu_ttm_placement_from_domain(bo,
AMDGPU_GEM_DOMAIN_CPU); AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
false);
if (r) if (r)
return r; return r;
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
...@@ -1575,6 +1577,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, ...@@ -1575,6 +1577,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
struct amdgpu_bo_va_mapping **map) struct amdgpu_bo_va_mapping **map)
{ {
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
int r; int r;
...@@ -1595,8 +1598,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, ...@@ -1595,8 +1598,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains); amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false, r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
false);
if (r) if (r)
return r; return r;
} }
......
...@@ -282,6 +282,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -282,6 +282,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_gem_userptr *args = data; struct drm_amdgpu_gem_userptr *args = data;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
...@@ -335,7 +336,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, ...@@ -335,7 +336,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto free_pages; goto free_pages;
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
if (r) if (r)
goto free_pages; goto free_pages;
......
...@@ -552,6 +552,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, ...@@ -552,6 +552,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
int amdgpu_bo_validate(struct amdgpu_bo *bo) int amdgpu_bo_validate(struct amdgpu_bo *bo)
{ {
struct ttm_operation_ctx ctx = { false, false };
uint32_t domain; uint32_t domain;
int r; int r;
...@@ -562,7 +563,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo) ...@@ -562,7 +563,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
retry: retry:
amdgpu_ttm_placement_from_domain(bo, domain); amdgpu_ttm_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains; domain = bo->allowed_domains;
goto retry; goto retry;
...@@ -673,6 +674,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ...@@ -673,6 +674,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 *gpu_addr) u64 *gpu_addr)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { false, false };
int r, i; int r, i;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
...@@ -723,7 +725,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ...@@ -723,7 +725,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
} }
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r)) { if (unlikely(r)) {
dev_err(adev->dev, "%p pin failed\n", bo); dev_err(adev->dev, "%p pin failed\n", bo);
goto error; goto error;
...@@ -760,6 +762,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr) ...@@ -760,6 +762,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
int amdgpu_bo_unpin(struct amdgpu_bo *bo) int amdgpu_bo_unpin(struct amdgpu_bo *bo)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { false, false };
int r, i; int r, i;
if (!bo->pin_count) { if (!bo->pin_count) {
...@@ -773,7 +776,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) ...@@ -773,7 +776,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
bo->placements[i].lpfn = 0; bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
} }
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r)) { if (unlikely(r)) {
dev_err(adev->dev, "%p validate failed for unpin\n", bo); dev_err(adev->dev, "%p validate failed for unpin\n", bo);
goto error; goto error;
...@@ -945,6 +948,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, ...@@ -945,6 +948,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo; struct amdgpu_bo *abo;
unsigned long offset, size; unsigned long offset, size;
int r; int r;
...@@ -978,7 +982,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) ...@@ -978,7 +982,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
abo->placement.num_busy_placement = 1; abo->placement.num_busy_placement = 1;
abo->placement.busy_placement = &abo->placements[1]; abo->placement.busy_placement = &abo->placements[1];
r = ttm_bo_validate(bo, &abo->placement, false, false); r = ttm_bo_validate(bo, &abo->placement, &ctx);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
......
...@@ -408,6 +408,7 @@ static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx) ...@@ -408,6 +408,7 @@ static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
*/ */
static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
{ {
struct ttm_operation_ctx tctx = { false, false };
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
uint32_t cmd; uint32_t cmd;
...@@ -430,7 +431,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) ...@@ -430,7 +431,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
} }
amdgpu_uvd_force_into_uvd_segment(bo); amdgpu_uvd_force_into_uvd_segment(bo);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
} }
return r; return r;
...@@ -949,6 +950,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) ...@@ -949,6 +950,7 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
bool direct, struct dma_fence **fence) bool direct, struct dma_fence **fence)
{ {
struct ttm_operation_ctx ctx = { true, false };
struct ttm_validate_buffer tv; struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct list_head head; struct list_head head;
...@@ -975,7 +977,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -975,7 +977,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
amdgpu_uvd_force_into_uvd_segment(bo); amdgpu_uvd_force_into_uvd_segment(bo);
} }
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r) if (r)
goto err; goto err;
......
...@@ -558,6 +558,7 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx, ...@@ -558,6 +558,7 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
int lo, int hi, unsigned size, int32_t index) int lo, int hi, unsigned size, int32_t index)
{ {
int64_t offset = ((uint64_t)size) * ((int64_t)index); int64_t offset = ((uint64_t)size) * ((int64_t)index);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
unsigned i, fpfn, lpfn; unsigned i, fpfn, lpfn;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
...@@ -587,7 +588,7 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx, ...@@ -587,7 +588,7 @@ static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
bo->placements[i].lpfn = bo->placements[i].fpfn ? bo->placements[i].lpfn = bo->placements[i].fpfn ?
min(bo->placements[i].fpfn, lpfn) : lpfn; min(bo->placements[i].fpfn, lpfn) : lpfn;
} }
return ttm_bo_validate(&bo->tbo, &bo->placement, false, false); return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
} }
......
...@@ -274,6 +274,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) ...@@ -274,6 +274,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
bool direct, struct dma_fence **fence) bool direct, struct dma_fence **fence)
{ {
struct ttm_operation_ctx ctx = { true, false };
struct ttm_validate_buffer tv; struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct list_head head; struct list_head head;
...@@ -294,7 +295,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b ...@@ -294,7 +295,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
if (r) if (r)
return r; return r;
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r) if (r)
goto err; goto err;
......
...@@ -354,6 +354,7 @@ static inline u64 ast_bo_gpu_offset(struct ast_bo *bo) ...@@ -354,6 +354,7 @@ static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr) int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
{ {
struct ttm_operation_ctx ctx = { false, false };
int i, ret; int i, ret;
if (bo->pin_count) { if (bo->pin_count) {
...@@ -365,7 +366,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr) ...@@ -365,7 +366,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
ast_ttm_placement(bo, pl_flag); ast_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++) for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) if (ret)
return ret; return ret;
...@@ -377,6 +378,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr) ...@@ -377,6 +378,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
int ast_bo_unpin(struct ast_bo *bo) int ast_bo_unpin(struct ast_bo *bo)
{ {
struct ttm_operation_ctx ctx = { false, false };
int i; int i;
if (!bo->pin_count) { if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo); DRM_ERROR("unpin bad %p\n", bo);
...@@ -388,11 +390,12 @@ int ast_bo_unpin(struct ast_bo *bo) ...@@ -388,11 +390,12 @@ int ast_bo_unpin(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++) for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
return ttm_bo_validate(&bo->bo, &bo->placement, false, false); return ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
} }
int ast_bo_push_sysram(struct ast_bo *bo) int ast_bo_push_sysram(struct ast_bo *bo)
{ {
struct ttm_operation_ctx ctx = { false, false };
int i, ret; int i, ret;
if (!bo->pin_count) { if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo); DRM_ERROR("unpin bad %p\n", bo);
...@@ -409,7 +412,7 @@ int ast_bo_push_sysram(struct ast_bo *bo) ...@@ -409,7 +412,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++) for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) { if (ret) {
DRM_ERROR("pushing to VRAM failed\n"); DRM_ERROR("pushing to VRAM failed\n");
return ret; return ret;
......
...@@ -283,6 +283,7 @@ static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo) ...@@ -283,6 +283,7 @@ static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo)
int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr) int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
{ {
struct ttm_operation_ctx ctx = { false, false };
int i, ret; int i, ret;
if (bo->pin_count) { if (bo->pin_count) {
...@@ -295,7 +296,7 @@ int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr) ...@@ -295,7 +296,7 @@ int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
bochs_ttm_placement(bo, pl_flag); bochs_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++) for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) if (ret)
return ret; return ret;
...@@ -307,6 +308,7 @@ int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr) ...@@ -307,6 +308,7 @@ int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
int bochs_bo_unpin(struct bochs_bo *bo) int bochs_bo_unpin(struct bochs_bo *bo)
{ {
struct ttm_operation_ctx ctx = { false, false };
int i, ret; int i, ret;
if (!bo->pin_count) { if (!bo->pin_count) {
...@@ -320,7 +322,7 @@ int bochs_bo_unpin(struct bochs_bo *bo) ...@@ -320,7 +322,7 @@ int bochs_bo_unpin(struct bochs_bo *bo)
for (i = 0; i < bo->placement.num_placement; i++) for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) if (ret)
return ret; return ret;
......
...@@ -358,6 +358,7 @@ static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo) ...@@ -358,6 +358,7 @@ static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo)
int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr) int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
{ {
struct ttm_operation_ctx ctx = { false, false };
int i, ret; int i, ret;
if (bo->pin_count) { if (bo->pin_count) {
...@@ -369,7 +370,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr) ...@@ -369,7 +370,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
cirrus_ttm_placement(bo, pl_flag); cirrus_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++) for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) if (ret)
return ret; return ret;
...@@ -381,6 +382,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr) ...@@ -381,6 +382,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
int cirrus_bo_push_sysram(struct cirrus_bo *bo) int cirrus_bo_push_sysram(struct cirrus_bo *bo)
{ {
struct ttm_operation_ctx ctx = { false, false };
int i, ret; int i, ret;
if (!bo->pin_count) { if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo); DRM_ERROR("unpin bad %p\n", bo);
...@@ -397,7 +399,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo) ...@@ -397,7 +399,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++) for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) { if (ret) {
DRM_ERROR("pushing to VRAM failed\n"); DRM_ERROR("pushing to VRAM failed\n");
return ret; return ret;
......
...@@ -344,6 +344,7 @@ int hibmc_bo_create(struct drm_device *dev, int size, int align, ...@@ -344,6 +344,7 @@ int hibmc_bo_create(struct drm_device *dev, int size, int align,
int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr) int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
{ {
struct ttm_operation_ctx ctx = { false, false };
int i, ret; int i, ret;
if (bo->pin_count) { if (bo->pin_count) {
...@@ -356,7 +357,7 @@ int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr) ...@@ -356,7 +357,7 @@ int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
hibmc_ttm_placement(bo, pl_flag); hibmc_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++) for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) if (ret)
return ret; return ret;
...@@ -368,6 +369,7 @@ int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr) ...@@ -368,6 +369,7 @@ int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
int hibmc_bo_unpin(struct hibmc_bo *bo) int hibmc_bo_unpin(struct hibmc_bo *bo)
{ {
struct ttm_operation_ctx ctx = { false, false };
int i, ret; int i, ret;
if (!bo->pin_count) { if (!bo->pin_count) {
...@@ -380,7 +382,7 @@ int hibmc_bo_unpin(struct hibmc_bo *bo) ...@@ -380,7 +382,7 @@ int hibmc_bo_unpin(struct hibmc_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++) for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) { if (ret) {
DRM_ERROR("validate failed for unpin: %d\n", ret); DRM_ERROR("validate failed for unpin: %d\n", ret);
return ret; return ret;
......
...@@ -354,6 +354,7 @@ static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo) ...@@ -354,6 +354,7 @@ static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo)
int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr) int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
{ {
struct ttm_operation_ctx ctx = { false, false };
int i, ret; int i, ret;
if (bo->pin_count) { if (bo->pin_count) {
...@@ -366,7 +367,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr) ...@@ -366,7 +367,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
mgag200_ttm_placement(bo, pl_flag); mgag200_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++) for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) if (ret)
return ret; return ret;
...@@ -378,6 +379,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr) ...@@ -378,6 +379,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
int mgag200_bo_unpin(struct mgag200_bo *bo) int mgag200_bo_unpin(struct mgag200_bo *bo)
{ {
struct ttm_operation_ctx ctx = { false, false };
int i; int i;
if (!bo->pin_count) { if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo); DRM_ERROR("unpin bad %p\n", bo);
...@@ -389,11 +391,12 @@ int mgag200_bo_unpin(struct mgag200_bo *bo) ...@@ -389,11 +391,12 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++) for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
return ttm_bo_validate(&bo->bo, &bo->placement, false, false); return ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
} }
int mgag200_bo_push_sysram(struct mgag200_bo *bo) int mgag200_bo_push_sysram(struct mgag200_bo *bo)
{ {
struct ttm_operation_ctx ctx = { false, false };
int i, ret; int i, ret;
if (!bo->pin_count) { if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo); DRM_ERROR("unpin bad %p\n", bo);
...@@ -410,7 +413,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo) ...@@ -410,7 +413,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++) for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) { if (ret) {
DRM_ERROR("pushing to VRAM failed\n"); DRM_ERROR("pushing to VRAM failed\n");
return ret; return ret;
......
...@@ -548,10 +548,10 @@ int ...@@ -548,10 +548,10 @@ int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
bool no_wait_gpu) bool no_wait_gpu)
{ {
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
int ret; int ret;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
interruptible, no_wait_gpu);
if (ret) if (ret)
return ret; return ret;
......
...@@ -309,6 +309,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data, ...@@ -309,6 +309,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
int ret; int ret;
struct drm_gem_object *gobj = NULL; struct drm_gem_object *gobj = NULL;
struct qxl_bo *qobj = NULL; struct qxl_bo *qobj = NULL;
struct ttm_operation_ctx ctx = { true, false };
if (update_area->left >= update_area->right || if (update_area->left >= update_area->right ||
update_area->top >= update_area->bottom) update_area->top >= update_area->bottom)
...@@ -326,8 +327,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data, ...@@ -326,8 +327,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
if (!qobj->pin_count) { if (!qobj->pin_count) {
qxl_ttm_placement_from_domain(qobj, qobj->type, false); qxl_ttm_placement_from_domain(qobj, qobj->type, false);
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
true, false);
if (unlikely(ret)) if (unlikely(ret))
goto out; goto out;
} }
......
...@@ -223,6 +223,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) ...@@ -223,6 +223,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
{ {
struct ttm_operation_ctx ctx = { false, false };
struct drm_device *ddev = bo->gem_base.dev; struct drm_device *ddev = bo->gem_base.dev;
int r; int r;
...@@ -233,7 +234,7 @@ static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) ...@@ -233,7 +234,7 @@ static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
return 0; return 0;
} }
qxl_ttm_placement_from_domain(bo, domain, true); qxl_ttm_placement_from_domain(bo, domain, true);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0)) { if (likely(r == 0)) {
bo->pin_count = 1; bo->pin_count = 1;
if (gpu_addr != NULL) if (gpu_addr != NULL)
...@@ -246,6 +247,7 @@ static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) ...@@ -246,6 +247,7 @@ static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
static int __qxl_bo_unpin(struct qxl_bo *bo) static int __qxl_bo_unpin(struct qxl_bo *bo)
{ {
struct ttm_operation_ctx ctx = { false, false };
struct drm_device *ddev = bo->gem_base.dev; struct drm_device *ddev = bo->gem_base.dev;
int r, i; int r, i;
...@@ -258,7 +260,7 @@ static int __qxl_bo_unpin(struct qxl_bo *bo) ...@@ -258,7 +260,7 @@ static int __qxl_bo_unpin(struct qxl_bo *bo)
return 0; return 0;
for (i = 0; i < bo->placement.num_placement; i++) for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_err(ddev->dev, "%p validate failed for unpin\n", bo); dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
return r; return r;
......
...@@ -230,12 +230,12 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) ...@@ -230,12 +230,12 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
static int qxl_release_validate_bo(struct qxl_bo *bo) static int qxl_release_validate_bo(struct qxl_bo *bo)
{ {
struct ttm_operation_ctx ctx = { true, false };
int ret; int ret;
if (!bo->pin_count) { if (!bo->pin_count) {
qxl_ttm_placement_from_domain(bo, bo->type, false); qxl_ttm_placement_from_domain(bo, bo->type, false);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
true, false);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -285,6 +285,7 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -285,6 +285,7 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct ttm_operation_ctx ctx = { true, false };
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_userptr *args = data; struct drm_radeon_gem_userptr *args = data;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
...@@ -343,7 +344,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, ...@@ -343,7 +344,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
} }
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
radeon_bo_unreserve(bo); radeon_bo_unreserve(bo);
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
if (r) if (r)
......
...@@ -124,6 +124,7 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, ...@@ -124,6 +124,7 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
unsigned long end) unsigned long end)
{ {
struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
struct ttm_operation_ctx ctx = { false, false };
struct interval_tree_node *it; struct interval_tree_node *it;
/* notification is exclusive, but interval is inclusive */ /* notification is exclusive, but interval is inclusive */
...@@ -157,7 +158,7 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, ...@@ -157,7 +158,7 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
DRM_ERROR("(%ld) failed to wait for user bo\n", r); DRM_ERROR("(%ld) failed to wait for user bo\n", r);
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r) if (r)
DRM_ERROR("(%ld) failed to validate user bo\n", r); DRM_ERROR("(%ld) failed to validate user bo\n", r);
......
...@@ -329,6 +329,7 @@ void radeon_bo_unref(struct radeon_bo **bo) ...@@ -329,6 +329,7 @@ void radeon_bo_unref(struct radeon_bo **bo)
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
u64 *gpu_addr) u64 *gpu_addr)
{ {
struct ttm_operation_ctx ctx = { false, false };
int r, i; int r, i;
if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
...@@ -371,7 +372,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, ...@@ -371,7 +372,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
} }
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0)) { if (likely(r == 0)) {
bo->pin_count = 1; bo->pin_count = 1;
if (gpu_addr != NULL) if (gpu_addr != NULL)
...@@ -393,6 +394,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) ...@@ -393,6 +394,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
int radeon_bo_unpin(struct radeon_bo *bo) int radeon_bo_unpin(struct radeon_bo *bo)
{ {
struct ttm_operation_ctx ctx = { false, false };
int r, i; int r, i;
if (!bo->pin_count) { if (!bo->pin_count) {
...@@ -406,7 +408,7 @@ int radeon_bo_unpin(struct radeon_bo *bo) ...@@ -406,7 +408,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
bo->placements[i].lpfn = 0; bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
} }
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0)) { if (likely(r == 0)) {
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
bo->rdev->vram_pin_size -= radeon_bo_size(bo); bo->rdev->vram_pin_size -= radeon_bo_size(bo);
...@@ -531,6 +533,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, ...@@ -531,6 +533,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
struct ww_acquire_ctx *ticket, struct ww_acquire_ctx *ticket,
struct list_head *head, int ring) struct list_head *head, int ring)
{ {
struct ttm_operation_ctx ctx = { true, false };
struct radeon_bo_list *lobj; struct radeon_bo_list *lobj;
struct list_head duplicates; struct list_head duplicates;
int r; int r;
...@@ -572,7 +575,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, ...@@ -572,7 +575,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
radeon_uvd_force_into_uvd_segment(bo, allowed); radeon_uvd_force_into_uvd_segment(bo, allowed);
initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
bytes_moved += atomic64_read(&rdev->num_bytes_moved) - bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
initial_bytes_moved; initial_bytes_moved;
...@@ -792,6 +795,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo, ...@@ -792,6 +795,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{ {
struct ttm_operation_ctx ctx = { false, false };
struct radeon_device *rdev; struct radeon_device *rdev;
struct radeon_bo *rbo; struct radeon_bo *rbo;
unsigned long offset, size, lpfn; unsigned long offset, size, lpfn;
...@@ -823,10 +827,10 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) ...@@ -823,10 +827,10 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
(!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
rbo->placements[i].lpfn = lpfn; rbo->placements[i].lpfn = lpfn;
} }
r = ttm_bo_validate(bo, &rbo->placement, false, false); r = ttm_bo_validate(bo, &rbo->placement, &ctx);
if (unlikely(r == -ENOMEM)) { if (unlikely(r == -ENOMEM)) {
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
return ttm_bo_validate(bo, &rbo->placement, false, false); return ttm_bo_validate(bo, &rbo->placement, &ctx);
} else if (unlikely(r != 0)) { } else if (unlikely(r != 0)) {
return r; return r;
} }
......
...@@ -387,6 +387,7 @@ static void radeon_vm_set_pages(struct radeon_device *rdev, ...@@ -387,6 +387,7 @@ static void radeon_vm_set_pages(struct radeon_device *rdev,
static int radeon_vm_clear_bo(struct radeon_device *rdev, static int radeon_vm_clear_bo(struct radeon_device *rdev,
struct radeon_bo *bo) struct radeon_bo *bo)
{ {
struct ttm_operation_ctx ctx = { true, false };
struct radeon_ib ib; struct radeon_ib ib;
unsigned entries; unsigned entries;
uint64_t addr; uint64_t addr;
...@@ -396,7 +397,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, ...@@ -396,7 +397,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
if (r) if (r)
return r; return r;
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r) if (r)
goto error_unreserve; goto error_unreserve;
......
...@@ -1091,9 +1091,8 @@ bool ttm_bo_mem_compat(struct ttm_placement *placement, ...@@ -1091,9 +1091,8 @@ bool ttm_bo_mem_compat(struct ttm_placement *placement,
EXPORT_SYMBOL(ttm_bo_mem_compat); EXPORT_SYMBOL(ttm_bo_mem_compat);
int ttm_bo_validate(struct ttm_buffer_object *bo, int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
bool interruptible, struct ttm_operation_ctx *ctx)
bool no_wait_gpu)
{ {
int ret; int ret;
uint32_t new_flags; uint32_t new_flags;
...@@ -1103,8 +1102,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, ...@@ -1103,8 +1102,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
* Check whether we need to move buffer. * Check whether we need to move buffer.
*/ */
if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
ret = ttm_bo_move_buffer(bo, placement, interruptible, ret = ttm_bo_move_buffer(bo, placement, ctx->interruptible,
no_wait_gpu); ctx->no_wait_gpu);
if (ret) if (ret)
return ret; return ret;
} else { } else {
...@@ -1219,8 +1218,11 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -1219,8 +1218,11 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
WARN_ON(!locked); WARN_ON(!locked);
} }
if (likely(!ret)) if (likely(!ret)) {
ret = ttm_bo_validate(bo, placement, interruptible, false); struct ttm_operation_ctx ctx = { interruptible, false };
ret = ttm_bo_validate(bo, placement, &ctx);
}
if (unlikely(ret)) { if (unlikely(ret)) {
if (!resv) if (!resv)
......
...@@ -56,6 +56,7 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data, ...@@ -56,6 +56,7 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket, static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
struct list_head *head) struct list_head *head)
{ {
struct ttm_operation_ctx ctx = { false, false };
struct ttm_validate_buffer *buf; struct ttm_validate_buffer *buf;
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
struct virtio_gpu_object *qobj; struct virtio_gpu_object *qobj;
...@@ -68,7 +69,7 @@ static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket, ...@@ -68,7 +69,7 @@ static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
list_for_each_entry(buf, head, head) { list_for_each_entry(buf, head, head) {
bo = buf->bo; bo = buf->bo;
qobj = container_of(bo, struct virtio_gpu_object, tbo); qobj = container_of(bo, struct virtio_gpu_object, tbo);
ret = ttm_bo_validate(bo, &qobj->placement, false, false); ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
if (ret) { if (ret) {
ttm_eu_backoff_reservation(ticket, head); ttm_eu_backoff_reservation(ticket, head);
return ret; return ret;
...@@ -352,6 +353,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, ...@@ -352,6 +353,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_from_host *args = data; struct drm_virtgpu_3d_transfer_from_host *args = data;
struct ttm_operation_ctx ctx = { true, false };
struct drm_gem_object *gobj = NULL; struct drm_gem_object *gobj = NULL;
struct virtio_gpu_object *qobj = NULL; struct virtio_gpu_object *qobj = NULL;
struct virtio_gpu_fence *fence; struct virtio_gpu_fence *fence;
...@@ -372,8 +374,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, ...@@ -372,8 +374,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (ret) if (ret)
goto out; goto out;
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
true, false);
if (unlikely(ret)) if (unlikely(ret))
goto out_unres; goto out_unres;
...@@ -399,6 +400,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, ...@@ -399,6 +400,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_to_host *args = data; struct drm_virtgpu_3d_transfer_to_host *args = data;
struct ttm_operation_ctx ctx = { true, false };
struct drm_gem_object *gobj = NULL; struct drm_gem_object *gobj = NULL;
struct virtio_gpu_object *qobj = NULL; struct virtio_gpu_object *qobj = NULL;
struct virtio_gpu_fence *fence; struct virtio_gpu_fence *fence;
...@@ -416,8 +418,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, ...@@ -416,8 +418,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
if (ret) if (ret)
goto out; goto out;
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
true, false);
if (unlikely(ret)) if (unlikely(ret))
goto out_unres; goto out_unres;
......
...@@ -387,6 +387,7 @@ static int vmw_cotable_readback(struct vmw_resource *res) ...@@ -387,6 +387,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
*/ */
static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
{ {
struct ttm_operation_ctx ctx = { false, false };
struct vmw_private *dev_priv = res->dev_priv; struct vmw_private *dev_priv = res->dev_priv;
struct vmw_cotable *vcotbl = vmw_cotable(res); struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_dma_buffer *buf, *old_buf = res->backup; struct vmw_dma_buffer *buf, *old_buf = res->backup;
...@@ -455,7 +456,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) ...@@ -455,7 +456,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
} }
/* Unpin new buffer, and switch backup buffers. */ /* Unpin new buffer, and switch backup buffers. */
ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false); ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Failed validating new COTable backup buffer.\n"); DRM_ERROR("Failed validating new COTable backup buffer.\n");
goto out_wait; goto out_wait;
......
...@@ -47,6 +47,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv, ...@@ -47,6 +47,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
struct ttm_placement *placement, struct ttm_placement *placement,
bool interruptible) bool interruptible)
{ {
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base; struct ttm_buffer_object *bo = &buf->base;
int ret; int ret;
uint32_t new_flags; uint32_t new_flags;
...@@ -65,7 +66,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv, ...@@ -65,7 +66,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
ret = ttm_bo_mem_compat(placement, &bo->mem, ret = ttm_bo_mem_compat(placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL; &new_flags) == true ? 0 : -EINVAL;
else else
ret = ttm_bo_validate(bo, placement, interruptible, false); ret = ttm_bo_validate(bo, placement, &ctx);
if (!ret) if (!ret)
vmw_bo_pin_reserved(buf, true); vmw_bo_pin_reserved(buf, true);
...@@ -95,6 +96,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, ...@@ -95,6 +96,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_dma_buffer *buf,
bool interruptible) bool interruptible)
{ {
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base; struct ttm_buffer_object *bo = &buf->base;
int ret; int ret;
uint32_t new_flags; uint32_t new_flags;
...@@ -115,12 +117,11 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv, ...@@ -115,12 +117,11 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
goto out_unreserve; goto out_unreserve;
} }
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible, ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
false);
if (likely(ret == 0) || ret == -ERESTARTSYS) if (likely(ret == 0) || ret == -ERESTARTSYS)
goto out_unreserve; goto out_unreserve;
ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false); ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
out_unreserve: out_unreserve:
if (!ret) if (!ret)
...@@ -170,6 +171,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv, ...@@ -170,6 +171,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf, struct vmw_dma_buffer *buf,
bool interruptible) bool interruptible)
{ {
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base; struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement; struct ttm_placement placement;
struct ttm_place place; struct ttm_place place;
...@@ -200,14 +202,16 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv, ...@@ -200,14 +202,16 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
if (bo->mem.mem_type == TTM_PL_VRAM && if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages && bo->mem.start < bo->num_pages &&
bo->mem.start > 0 && bo->mem.start > 0 &&
buf->pin_count == 0) buf->pin_count == 0) {
(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false); ctx.interruptible = false;
(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
}
if (buf->pin_count > 0) if (buf->pin_count > 0)
ret = ttm_bo_mem_compat(&placement, &bo->mem, ret = ttm_bo_mem_compat(&placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL; &new_flags) == true ? 0 : -EINVAL;
else else
ret = ttm_bo_validate(bo, &placement, interruptible, false); ret = ttm_bo_validate(bo, &placement, &ctx);
/* For some reason we didn't end up at the start of vram */ /* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0); WARN_ON(ret == 0 && bo->offset != 0);
...@@ -286,6 +290,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, ...@@ -286,6 +290,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
*/ */
void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin) void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
{ {
struct ttm_operation_ctx ctx = { false, true };
struct ttm_place pl; struct ttm_place pl;
struct ttm_placement placement; struct ttm_placement placement;
struct ttm_buffer_object *bo = &vbo->base; struct ttm_buffer_object *bo = &vbo->base;
...@@ -314,7 +319,7 @@ void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin) ...@@ -314,7 +319,7 @@ void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
placement.num_placement = 1; placement.num_placement = 1;
placement.placement = &pl; placement.placement = &pl;
ret = ttm_bo_validate(bo, &placement, false, true); ret = ttm_bo_validate(bo, &placement, &ctx);
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type); BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
} }
...@@ -3701,14 +3701,14 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv, ...@@ -3701,14 +3701,14 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
{ {
struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer, struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
base); base);
struct ttm_operation_ctx ctx = { interruptible, true };
int ret; int ret;
if (vbo->pin_count > 0) if (vbo->pin_count > 0)
return 0; return 0;
if (validate_as_mob) if (validate_as_mob)
return ttm_bo_validate(bo, &vmw_mob_placement, interruptible, return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
false);
/** /**
* Put BO in VRAM if there is space, otherwise as a GMR. * Put BO in VRAM if there is space, otherwise as a GMR.
...@@ -3717,8 +3717,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv, ...@@ -3717,8 +3717,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* used as a GMR, this will return -ENOMEM. * used as a GMR, this will return -ENOMEM.
*/ */
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible, ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
false);
if (likely(ret == 0 || ret == -ERESTARTSYS)) if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret; return ret;
...@@ -3727,7 +3726,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv, ...@@ -3727,7 +3726,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* previous contents. * previous contents.
*/ */
ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false); ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
return ret; return ret;
} }
......
...@@ -968,6 +968,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, ...@@ -968,6 +968,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
bool interruptible, bool interruptible,
struct ttm_validate_buffer *val_buf) struct ttm_validate_buffer *val_buf)
{ {
struct ttm_operation_ctx ctx = { true, false };
struct list_head val_list; struct list_head val_list;
bool backup_dirty = false; bool backup_dirty = false;
int ret; int ret;
...@@ -992,7 +993,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, ...@@ -992,7 +993,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
backup_dirty = res->backup_dirty; backup_dirty = res->backup_dirty;
ret = ttm_bo_validate(&res->backup->base, ret = ttm_bo_validate(&res->backup->base,
res->func->backup_placement, res->func->backup_placement,
true, false); &ctx);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_validate; goto out_no_validate;
...@@ -1446,6 +1447,7 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv) ...@@ -1446,6 +1447,7 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv)
*/ */
int vmw_resource_pin(struct vmw_resource *res, bool interruptible) int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
{ {
struct ttm_operation_ctx ctx = { interruptible, false };
struct vmw_private *dev_priv = res->dev_priv; struct vmw_private *dev_priv = res->dev_priv;
int ret; int ret;
...@@ -1466,7 +1468,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) ...@@ -1466,7 +1468,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
ret = ttm_bo_validate ret = ttm_bo_validate
(&vbo->base, (&vbo->base,
res->func->backup_placement, res->func->backup_placement,
interruptible, false); &ctx);
if (ret) { if (ret) {
ttm_bo_unreserve(&vbo->base); ttm_bo_unreserve(&vbo->base);
goto out_no_validate; goto out_no_validate;
......
...@@ -970,6 +970,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, ...@@ -970,6 +970,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
size_t size, size_t size,
struct list_head *list) struct list_head *list)
{ {
struct ttm_operation_ctx ctx = { false, true };
struct vmw_dma_buffer *buf; struct vmw_dma_buffer *buf;
struct ttm_bo_kmap_obj map; struct ttm_bo_kmap_obj map;
bool is_iomem; bool is_iomem;
...@@ -1005,7 +1006,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, ...@@ -1005,7 +1006,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
WARN_ON(is_iomem); WARN_ON(is_iomem);
ttm_bo_kunmap(&map); ttm_bo_kunmap(&map);
ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true); ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, &ctx);
WARN_ON(ret != 0); WARN_ON(ret != 0);
ttm_bo_unreserve(&buf->base); ttm_bo_unreserve(&buf->base);
......
...@@ -258,6 +258,20 @@ struct ttm_bo_kmap_obj { ...@@ -258,6 +258,20 @@ struct ttm_bo_kmap_obj {
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
}; };
/**
* struct ttm_operation_ctx
*
* @interruptible: Sleep interruptible if sleeping.
* @no_wait_gpu: Return immediately if the GPU is busy.
*
* Context for TTM operations like changing buffer placement or general memory
* allocation.
*/
struct ttm_operation_ctx {
bool interruptible;
bool no_wait_gpu;
};
/** /**
* ttm_bo_reference - reference a struct ttm_buffer_object * ttm_bo_reference - reference a struct ttm_buffer_object
* *
...@@ -306,8 +320,7 @@ bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem, ...@@ -306,8 +320,7 @@ bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem,
* *
* @bo: The buffer object. * @bo: The buffer object.
* @placement: Proposed placement for the buffer object. * @placement: Proposed placement for the buffer object.
* @interruptible: Sleep interruptible if sleeping. * @ctx: validation parameters.
* @no_wait_gpu: Return immediately if the GPU is busy.
* *
* Changes placement and caching policy of the buffer object * Changes placement and caching policy of the buffer object
* according proposed placement. * according proposed placement.
...@@ -319,8 +332,7 @@ bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem, ...@@ -319,8 +332,7 @@ bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem,
*/ */
int ttm_bo_validate(struct ttm_buffer_object *bo, int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement, struct ttm_placement *placement,
bool interruptible, struct ttm_operation_ctx *ctx);
bool no_wait_gpu);
/** /**
* ttm_bo_unref * ttm_bo_unref
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment