Commit 13a752e3 authored by Monk Liu's avatar Monk Liu Committed by Alex Deucher

drm/amdgpu:cleanup in_sriov_reset and lock_reset

since now gpu reset is unified with gpu_recover
for both bare-metal and SR-IOV:

1)rename in_sriov_reset to in_gpu_reset
2)move lock_reset from adev->virt to adev
Signed-off-by: default avatarMonk Liu <Monk.Liu@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 5740682e
...@@ -1643,7 +1643,8 @@ struct amdgpu_device { ...@@ -1643,7 +1643,8 @@ struct amdgpu_device {
/* record last mm index being written through WREG32*/ /* record last mm index being written through WREG32*/
unsigned long last_mm_index; unsigned long last_mm_index;
bool in_sriov_reset; bool in_gpu_reset;
struct mutex lock_reset;
}; };
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
......
...@@ -2163,6 +2163,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -2163,6 +2163,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->mn_lock); mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock); mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash); hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
amdgpu_check_arguments(adev); amdgpu_check_arguments(adev);
...@@ -2990,9 +2991,9 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) ...@@ -2990,9 +2991,9 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
dev_info(adev->dev, "GPU reset begin!\n"); dev_info(adev->dev, "GPU reset begin!\n");
mutex_lock(&adev->virt.lock_reset); mutex_lock(&adev->lock_reset);
atomic_inc(&adev->gpu_reset_counter); atomic_inc(&adev->gpu_reset_counter);
adev->in_sriov_reset = 1; adev->in_gpu_reset = 1;
/* block TTM */ /* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
...@@ -3102,8 +3103,8 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) ...@@ -3102,8 +3103,8 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
} }
amdgpu_vf_error_trans_all(adev); amdgpu_vf_error_trans_all(adev);
adev->in_sriov_reset = 0; adev->in_gpu_reset = 0;
mutex_unlock(&adev->virt.lock_reset); mutex_unlock(&adev->lock_reset);
return r; return r;
} }
......
...@@ -264,7 +264,7 @@ static int psp_hw_start(struct psp_context *psp) ...@@ -264,7 +264,7 @@ static int psp_hw_start(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev; struct amdgpu_device *adev = psp->adev;
int ret; int ret;
if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) { if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
ret = psp_bootloader_load_sysdrv(psp); ret = psp_bootloader_load_sysdrv(psp);
if (ret) if (ret)
return ret; return ret;
......
...@@ -370,7 +370,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) ...@@ -370,7 +370,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
return 0; return 0;
} }
if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) { if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
......
...@@ -115,8 +115,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) ...@@ -115,8 +115,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
adev->enable_virtual_display = true; adev->enable_virtual_display = true;
adev->cg_flags = 0; adev->cg_flags = 0;
adev->pg_flags = 0; adev->pg_flags = 0;
mutex_init(&adev->virt.lock_reset);
} }
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
......
...@@ -239,7 +239,6 @@ struct amdgpu_virt { ...@@ -239,7 +239,6 @@ struct amdgpu_virt {
uint64_t csa_vmid0_addr; uint64_t csa_vmid0_addr;
bool chained_ib_support; bool chained_ib_support;
uint32_t reg_val_offs; uint32_t reg_val_offs;
struct mutex lock_reset;
struct amdgpu_irq_src ack_irq; struct amdgpu_irq_src ack_irq;
struct amdgpu_irq_src rcv_irq; struct amdgpu_irq_src rcv_irq;
struct work_struct flr_work; struct work_struct flr_work;
......
...@@ -4824,7 +4824,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) ...@@ -4824,7 +4824,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v8_0_kiq_setting(ring); gfx_v8_0_kiq_setting(ring);
if (adev->in_sriov_reset) { /* for GPU_RESET case */ if (adev->in_gpu_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
...@@ -4861,7 +4861,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) ...@@ -4861,7 +4861,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
struct vi_mqd *mqd = ring->mqd_ptr; struct vi_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0]; int mqd_idx = ring - &adev->gfx.compute_ring[0];
if (!adev->in_sriov_reset && !adev->gfx.in_suspend) { if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
...@@ -4873,7 +4873,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) ...@@ -4873,7 +4873,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
} else if (adev->in_sriov_reset) { /* for GPU_RESET case */ } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
......
...@@ -2757,7 +2757,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) ...@@ -2757,7 +2757,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v9_0_kiq_setting(ring); gfx_v9_0_kiq_setting(ring);
if (adev->in_sriov_reset) { /* for GPU_RESET case */ if (adev->in_gpu_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
...@@ -2795,7 +2795,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) ...@@ -2795,7 +2795,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
struct v9_mqd *mqd = ring->mqd_ptr; struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0]; int mqd_idx = ring - &adev->gfx.compute_ring[0];
if (!adev->in_sriov_reset && !adev->gfx.in_suspend) { if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
...@@ -2807,7 +2807,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) ...@@ -2807,7 +2807,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
} else if (adev->in_sriov_reset) { /* for GPU_RESET case */ } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment