Commit c81a1a74 authored by Michel Dänzer's avatar Michel Dänzer Committed by Alex Deucher

drm/amdgpu: Make amdgpu_bo_reserve use uninterruptible waits for cleanup

Some of these paths probably cannot be interrupted by a signal anyway.
Those that can would fail to clean up things if they actually got
interrupted.
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarMichel Dänzer <michel.daenzer@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 8b9242ed
...@@ -123,7 +123,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) ...@@ -123,7 +123,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
int r; int r;
/* unpin of the old buffer */ /* unpin of the old buffer */
r = amdgpu_bo_reserve(work->old_abo, false); r = amdgpu_bo_reserve(work->old_abo, true);
if (likely(r == 0)) { if (likely(r == 0)) {
r = amdgpu_bo_unpin(work->old_abo); r = amdgpu_bo_unpin(work->old_abo);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
......
...@@ -112,7 +112,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) ...@@ -112,7 +112,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
int ret; int ret;
ret = amdgpu_bo_reserve(abo, false); ret = amdgpu_bo_reserve(abo, true);
if (likely(ret == 0)) { if (likely(ret == 0)) {
amdgpu_bo_kunmap(abo); amdgpu_bo_kunmap(abo);
amdgpu_bo_unpin(abo); amdgpu_bo_unpin(abo);
......
...@@ -183,7 +183,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev) ...@@ -183,7 +183,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
if (adev->gart.robj == NULL) { if (adev->gart.robj == NULL) {
return; return;
} }
r = amdgpu_bo_reserve(adev->gart.robj, false); r = amdgpu_bo_reserve(adev->gart.robj, true);
if (likely(r == 0)) { if (likely(r == 0)) {
amdgpu_bo_kunmap(adev->gart.robj); amdgpu_bo_kunmap(adev->gart.robj);
amdgpu_bo_unpin(adev->gart.robj); amdgpu_bo_unpin(adev->gart.robj);
......
...@@ -821,7 +821,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, ...@@ -821,7 +821,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
/* TODO: how to handle reserve failure */ /* TODO: how to handle reserve failure */
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, false)); BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va); amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
fpriv->vm.csa_bo_va = NULL; fpriv->vm.csa_bo_va = NULL;
amdgpu_bo_unreserve(adev->virt.csa_obj); amdgpu_bo_unreserve(adev->virt.csa_obj);
......
...@@ -113,7 +113,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj) ...@@ -113,7 +113,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
int ret = 0; int ret = 0;
ret = amdgpu_bo_reserve(bo, false); ret = amdgpu_bo_reserve(bo, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return; return;
......
...@@ -130,7 +130,7 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, ...@@ -130,7 +130,7 @@ int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
return -EINVAL; return -EINVAL;
} }
r = amdgpu_bo_reserve(sa_manager->bo, false); r = amdgpu_bo_reserve(sa_manager->bo, true);
if (!r) { if (!r) {
amdgpu_bo_kunmap(sa_manager->bo); amdgpu_bo_kunmap(sa_manager->bo);
amdgpu_bo_unpin(sa_manager->bo); amdgpu_bo_unpin(sa_manager->bo);
......
...@@ -1198,7 +1198,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) ...@@ -1198,7 +1198,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
return; return;
amdgpu_ttm_debugfs_fini(adev); amdgpu_ttm_debugfs_fini(adev);
if (adev->stollen_vga_memory) { if (adev->stollen_vga_memory) {
r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); r = amdgpu_bo_reserve(adev->stollen_vga_memory, true);
if (r == 0) { if (r == 0) {
amdgpu_bo_unpin(adev->stollen_vga_memory); amdgpu_bo_unpin(adev->stollen_vga_memory);
amdgpu_bo_unreserve(adev->stollen_vga_memory); amdgpu_bo_unreserve(adev->stollen_vga_memory);
......
...@@ -2230,7 +2230,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, ...@@ -2230,7 +2230,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) { if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb); amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
amdgpu_bo_unpin(abo); amdgpu_bo_unpin(abo);
...@@ -2589,7 +2589,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, ...@@ -2589,7 +2589,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin: unpin:
if (amdgpu_crtc->cursor_bo) { if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(aobj, false); ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) { if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj); amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj); amdgpu_bo_unreserve(aobj);
...@@ -2720,7 +2720,7 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc) ...@@ -2720,7 +2720,7 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r)) if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n"); DRM_ERROR("failed to reserve abo before unpin\n");
else { else {
......
...@@ -2214,7 +2214,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, ...@@ -2214,7 +2214,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) { if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb); amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
amdgpu_bo_unpin(abo); amdgpu_bo_unpin(abo);
...@@ -2609,7 +2609,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, ...@@ -2609,7 +2609,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin: unpin:
if (amdgpu_crtc->cursor_bo) { if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(aobj, false); ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) { if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj); amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj); amdgpu_bo_unreserve(aobj);
...@@ -2740,7 +2740,7 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc) ...@@ -2740,7 +2740,7 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r)) if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n"); DRM_ERROR("failed to reserve abo before unpin\n");
else { else {
......
...@@ -1645,7 +1645,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, ...@@ -1645,7 +1645,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) { if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb); amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
amdgpu_bo_unpin(abo); amdgpu_bo_unpin(abo);
...@@ -1962,7 +1962,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, ...@@ -1962,7 +1962,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin: unpin:
if (amdgpu_crtc->cursor_bo) { if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(aobj, false); ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) { if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj); amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj); amdgpu_bo_unreserve(aobj);
...@@ -2088,7 +2088,7 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc) ...@@ -2088,7 +2088,7 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r)) if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n"); DRM_ERROR("failed to reserve abo before unpin\n");
else { else {
......
...@@ -2089,7 +2089,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, ...@@ -2089,7 +2089,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->primary->fb) { if (!atomic && fb && fb != crtc->primary->fb) {
amdgpu_fb = to_amdgpu_framebuffer(fb); amdgpu_fb = to_amdgpu_framebuffer(fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
amdgpu_bo_unpin(abo); amdgpu_bo_unpin(abo);
...@@ -2440,7 +2440,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, ...@@ -2440,7 +2440,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
unpin: unpin:
if (amdgpu_crtc->cursor_bo) { if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(aobj, false); ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) { if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj); amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj); amdgpu_bo_unreserve(aobj);
...@@ -2571,7 +2571,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) ...@@ -2571,7 +2571,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r)) if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n"); DRM_ERROR("failed to reserve abo before unpin\n");
else { else {
......
...@@ -248,7 +248,7 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc) ...@@ -248,7 +248,7 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
abo = gem_to_amdgpu_bo(amdgpu_fb->obj); abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, true);
if (unlikely(r)) if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n"); DRM_ERROR("failed to reserve abo before unpin\n");
else { else {
......
...@@ -2437,7 +2437,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) ...@@ -2437,7 +2437,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
int r; int r;
if (adev->gfx.rlc.save_restore_obj) { if (adev->gfx.rlc.save_restore_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj); amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
...@@ -2448,7 +2448,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) ...@@ -2448,7 +2448,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
} }
if (adev->gfx.rlc.clear_state_obj) { if (adev->gfx.rlc.clear_state_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
...@@ -2459,7 +2459,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) ...@@ -2459,7 +2459,7 @@ static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
} }
if (adev->gfx.rlc.cp_table_obj) { if (adev->gfx.rlc.cp_table_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
......
...@@ -2792,7 +2792,7 @@ static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev) ...@@ -2792,7 +2792,7 @@ static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
if (ring->mqd_obj) { if (ring->mqd_obj) {
r = amdgpu_bo_reserve(ring->mqd_obj, false); r = amdgpu_bo_reserve(ring->mqd_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r); dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
...@@ -2810,7 +2810,7 @@ static void gfx_v7_0_mec_fini(struct amdgpu_device *adev) ...@@ -2810,7 +2810,7 @@ static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
int r; int r;
if (adev->gfx.mec.hpd_eop_obj) { if (adev->gfx.mec.hpd_eop_obj) {
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
...@@ -3359,7 +3359,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev) ...@@ -3359,7 +3359,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
/* save restore block */ /* save restore block */
if (adev->gfx.rlc.save_restore_obj) { if (adev->gfx.rlc.save_restore_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj); amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
...@@ -3371,7 +3371,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev) ...@@ -3371,7 +3371,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
/* clear state block */ /* clear state block */
if (adev->gfx.rlc.clear_state_obj) { if (adev->gfx.rlc.clear_state_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
...@@ -3383,7 +3383,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev) ...@@ -3383,7 +3383,7 @@ static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
/* clear state block */ /* clear state block */
if (adev->gfx.rlc.cp_table_obj) { if (adev->gfx.rlc.cp_table_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
......
...@@ -1239,7 +1239,7 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) ...@@ -1239,7 +1239,7 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
/* clear state block */ /* clear state block */
if (adev->gfx.rlc.clear_state_obj) { if (adev->gfx.rlc.clear_state_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
...@@ -1250,7 +1250,7 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) ...@@ -1250,7 +1250,7 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
/* jump table block */ /* jump table block */
if (adev->gfx.rlc.cp_table_obj) { if (adev->gfx.rlc.cp_table_obj) {
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
...@@ -1363,7 +1363,7 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) ...@@ -1363,7 +1363,7 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
int r; int r;
if (adev->gfx.mec.hpd_eop_obj) { if (adev->gfx.mec.hpd_eop_obj) {
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
...@@ -1490,7 +1490,7 @@ static int gfx_v8_0_kiq_init(struct amdgpu_device *adev) ...@@ -1490,7 +1490,7 @@ static int gfx_v8_0_kiq_init(struct amdgpu_device *adev)
memset(hpd, 0, MEC_HPD_SIZE); memset(hpd, 0, MEC_HPD_SIZE);
r = amdgpu_bo_reserve(kiq->eop_obj, false); r = amdgpu_bo_reserve(kiq->eop_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r); dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
amdgpu_bo_kunmap(kiq->eop_obj); amdgpu_bo_kunmap(kiq->eop_obj);
......
...@@ -453,7 +453,7 @@ static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) ...@@ -453,7 +453,7 @@ static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
int r; int r;
if (adev->gfx.mec.hpd_eop_obj) { if (adev->gfx.mec.hpd_eop_obj) {
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
...@@ -463,7 +463,7 @@ static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) ...@@ -463,7 +463,7 @@ static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
adev->gfx.mec.hpd_eop_obj = NULL; adev->gfx.mec.hpd_eop_obj = NULL;
} }
if (adev->gfx.mec.mec_fw_obj) { if (adev->gfx.mec.mec_fw_obj) {
r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false); r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve mec firmware bo failed\n", r); dev_warn(adev->dev, "(%d) reserve mec firmware bo failed\n", r);
amdgpu_bo_unpin(adev->gfx.mec.mec_fw_obj); amdgpu_bo_unpin(adev->gfx.mec.mec_fw_obj);
...@@ -599,7 +599,7 @@ static int gfx_v9_0_kiq_init(struct amdgpu_device *adev) ...@@ -599,7 +599,7 @@ static int gfx_v9_0_kiq_init(struct amdgpu_device *adev)
memset(hpd, 0, MEC_HPD_SIZE); memset(hpd, 0, MEC_HPD_SIZE);
r = amdgpu_bo_reserve(kiq->eop_obj, false); r = amdgpu_bo_reserve(kiq->eop_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r); dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
amdgpu_bo_kunmap(kiq->eop_obj); amdgpu_bo_kunmap(kiq->eop_obj);
...@@ -1786,7 +1786,7 @@ static void gfx_v9_0_cp_compute_fini(struct amdgpu_device *adev) ...@@ -1786,7 +1786,7 @@ static void gfx_v9_0_cp_compute_fini(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
if (ring->mqd_obj) { if (ring->mqd_obj) {
r = amdgpu_bo_reserve(ring->mqd_obj, false); r = amdgpu_bo_reserve(ring->mqd_obj, true);
if (unlikely(r != 0)) if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r); dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment