Commit c0e51931 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: cleanup coding style in amdgpu_vm_flush

Abort early if there is nothing todo and correctly indent the "if"s.
Reviewed-by: default avatarJunwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 641e9400
...@@ -597,60 +597,62 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) ...@@ -597,60 +597,62 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
id->gws_size != job->gws_size || id->gws_size != job->gws_size ||
id->oa_base != job->oa_base || id->oa_base != job->oa_base ||
id->oa_size != job->oa_size); id->oa_size != job->oa_size);
unsigned patch_offset = 0;
int r; int r;
if (job->vm_needs_flush || gds_switch_needed || if (!job->vm_needs_flush && !gds_switch_needed &&
amdgpu_vm_had_gpu_reset(adev, id) || !amdgpu_vm_had_gpu_reset(adev, id) &&
amdgpu_vm_ring_has_compute_vm_bug(ring)) { !amdgpu_vm_ring_has_compute_vm_bug(ring))
unsigned patch_offset = 0; return 0;
if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (ring->funcs->emit_pipeline_sync && if (ring->funcs->init_cond_exec)
(job->vm_needs_flush || gds_switch_needed || patch_offset = amdgpu_ring_init_cond_exec(ring);
amdgpu_vm_ring_has_compute_vm_bug(ring)))
amdgpu_ring_emit_pipeline_sync(ring);
if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || if (ring->funcs->emit_pipeline_sync &&
amdgpu_vm_had_gpu_reset(adev, id))) { (job->vm_needs_flush || gds_switch_needed ||
struct dma_fence *fence; amdgpu_vm_ring_has_compute_vm_bug(ring)))
u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr); amdgpu_ring_emit_pipeline_sync(ring);
trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id); if (ring->funcs->emit_vm_flush &&
amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr); (job->vm_needs_flush || amdgpu_vm_had_gpu_reset(adev, id))) {
r = amdgpu_fence_emit(ring, &fence); u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
if (r) struct dma_fence *fence;
return r;
mutex_lock(&adev->vm_manager.lock); trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
dma_fence_put(id->last_flush); amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
}
if (gds_switch_needed) { r = amdgpu_fence_emit(ring, &fence);
id->gds_base = job->gds_base; if (r)
id->gds_size = job->gds_size; return r;
id->gws_base = job->gws_base;
id->gws_size = job->gws_size;
id->oa_base = job->oa_base;
id->oa_size = job->oa_size;
amdgpu_ring_emit_gds_switch(ring, job->vm_id,
job->gds_base, job->gds_size,
job->gws_base, job->gws_size,
job->oa_base, job->oa_size);
}
if (ring->funcs->patch_cond_exec) mutex_lock(&adev->vm_manager.lock);
amdgpu_ring_patch_cond_exec(ring, patch_offset); dma_fence_put(id->last_flush);
id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
}
/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ if (gds_switch_needed) {
if (ring->funcs->emit_switch_buffer) { id->gds_base = job->gds_base;
amdgpu_ring_emit_switch_buffer(ring); id->gds_size = job->gds_size;
amdgpu_ring_emit_switch_buffer(ring); id->gws_base = job->gws_base;
} id->gws_size = job->gws_size;
id->oa_base = job->oa_base;
id->oa_size = job->oa_size;
amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
job->gds_size, job->gws_base,
job->gws_size, job->oa_base,
job->oa_size);
}
if (ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
if (ring->funcs->emit_switch_buffer) {
amdgpu_ring_emit_switch_buffer(ring);
amdgpu_ring_emit_switch_buffer(ring);
} }
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment