Commit 48ad368a authored by Andrey Grodzovsky's avatar Andrey Grodzovsky Committed by Alex Deucher

drm/amdgpu: move amdgpu_ctx_mgr_entity_fini to f_ops flush hook (V4)

With this we can now terminate jobs enqueue into SW queue the moment
the task is being killed instead of waiting for last user of
drm file to release it.

Also stop checking for kref_read(&ctx->refcount) == 1 when
calling drm_sched_entity_do_release since other task
might still hold a reference to this entity but we don't
care since KILL means terminate job submission regardless
of what other tasks are doing.

v2:
Use returned remaining timeout as parameter for the next call.
Rebase.

v3:
Switch to working with jiffies.
Streamline remainder TO usage.
Rebase.

v4:
Rebase.
Signed-off-by: default avatarAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 741f01e6
...@@ -449,26 +449,28 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) ...@@ -449,26 +449,28 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
struct amdgpu_ctx *ctx; struct amdgpu_ctx *ctx;
struct idr *idp; struct idr *idp;
uint32_t id, i; uint32_t id, i;
long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
idp = &mgr->ctx_handles; idp = &mgr->ctx_handles;
mutex_lock(&mgr->lock);
idr_for_each_entry(idp, ctx, id) { idr_for_each_entry(idp, ctx, id) {
if (!ctx->adev) if (!ctx->adev) {
mutex_unlock(&mgr->lock);
return; return;
}
for (i = 0; i < ctx->adev->num_rings; i++) { for (i = 0; i < ctx->adev->num_rings; i++) {
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue; continue;
if (kref_read(&ctx->refcount) == 1) max_wait = drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
drm_sched_entity_do_release(&ctx->adev->rings[i]->sched, &ctx->rings[i].entity, max_wait);
&ctx->rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
} }
} }
mutex_unlock(&mgr->lock);
} }
void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr) void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
......
...@@ -855,9 +855,21 @@ static const struct dev_pm_ops amdgpu_pm_ops = { ...@@ -855,9 +855,21 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
.runtime_idle = amdgpu_pmops_runtime_idle, .runtime_idle = amdgpu_pmops_runtime_idle,
}; };
static int amdgpu_flush(struct file *f, fl_owner_t id)
{
struct drm_file *file_priv = f->private_data;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
return 0;
}
static const struct file_operations amdgpu_driver_kms_fops = { static const struct file_operations amdgpu_driver_kms_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = drm_open, .open = drm_open,
.flush = amdgpu_flush,
.release = drm_release, .release = drm_release,
.unlocked_ioctl = amdgpu_drm_ioctl, .unlocked_ioctl = amdgpu_drm_ioctl,
.mmap = amdgpu_mmap, .mmap = amdgpu_mmap,
......
...@@ -930,7 +930,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, ...@@ -930,7 +930,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
return; return;
pm_runtime_get_sync(dev->dev); pm_runtime_get_sync(dev->dev);
amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
if (adev->asic_type != CHIP_RAVEN) { if (adev->asic_type != CHIP_RAVEN) {
amdgpu_uvd_free_handles(adev, file_priv); amdgpu_uvd_free_handles(adev, file_priv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment