Commit ddd2b472 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2024-01-26' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

Plenty of ivpu fixes to improve the general stability and debugging, a
suspend fix for the anx7625 bridge, a revert to fix an initialization
order bug between i915 and simpledrm and a documentation warning fix for
dp_mst.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Maxime Ripard <mripard@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/tp77e5fokigup6cgmpq6mtg46kzdw2dpze6smpnwfoml4kmwpq@bo6mbkezpkle
parents 66dbd900 27d19268
...@@ -102,7 +102,7 @@ static int reset_pending_show(struct seq_file *s, void *v) ...@@ -102,7 +102,7 @@ static int reset_pending_show(struct seq_file *s, void *v)
{ {
struct ivpu_device *vdev = seq_to_ivpu(s); struct ivpu_device *vdev = seq_to_ivpu(s);
seq_printf(s, "%d\n", atomic_read(&vdev->pm->in_reset)); seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_pending));
return 0; return 0;
} }
...@@ -130,7 +130,9 @@ dvfs_mode_fops_write(struct file *file, const char __user *user_buf, size_t size ...@@ -130,7 +130,9 @@ dvfs_mode_fops_write(struct file *file, const char __user *user_buf, size_t size
fw->dvfs_mode = dvfs_mode; fw->dvfs_mode = dvfs_mode;
ivpu_pm_schedule_recovery(vdev); ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
if (ret)
return ret;
return size; return size;
} }
...@@ -190,7 +192,10 @@ fw_profiling_freq_fops_write(struct file *file, const char __user *user_buf, ...@@ -190,7 +192,10 @@ fw_profiling_freq_fops_write(struct file *file, const char __user *user_buf,
return ret; return ret;
ivpu_hw_profiling_freq_drive(vdev, enable); ivpu_hw_profiling_freq_drive(vdev, enable);
ivpu_pm_schedule_recovery(vdev);
ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
if (ret)
return ret;
return size; return size;
} }
...@@ -301,11 +306,18 @@ static ssize_t ...@@ -301,11 +306,18 @@ static ssize_t
ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos) ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
{ {
struct ivpu_device *vdev = file->private_data; struct ivpu_device *vdev = file->private_data;
int ret;
if (!size) if (!size)
return -EINVAL; return -EINVAL;
ivpu_pm_schedule_recovery(vdev); ret = ivpu_rpm_get(vdev);
if (ret)
return ret;
ivpu_pm_trigger_recovery(vdev, "debugfs");
flush_work(&vdev->pm->recovery_work);
ivpu_rpm_put(vdev);
return size; return size;
} }
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_accel.h> #include <drm/drm_accel.h>
#include <drm/drm_file.h> #include <drm/drm_file.h>
...@@ -17,6 +18,7 @@ ...@@ -17,6 +18,7 @@
#include "ivpu_debugfs.h" #include "ivpu_debugfs.h"
#include "ivpu_drv.h" #include "ivpu_drv.h"
#include "ivpu_fw.h" #include "ivpu_fw.h"
#include "ivpu_fw_log.h"
#include "ivpu_gem.h" #include "ivpu_gem.h"
#include "ivpu_hw.h" #include "ivpu_hw.h"
#include "ivpu_ipc.h" #include "ivpu_ipc.h"
...@@ -65,22 +67,20 @@ struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv) ...@@ -65,22 +67,20 @@ struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
return file_priv; return file_priv;
} }
struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id) static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
{ {
struct ivpu_file_priv *file_priv; mutex_lock(&file_priv->lock);
if (file_priv->bound) {
xa_lock_irq(&vdev->context_xa); ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
file_priv = xa_load(&vdev->context_xa, id);
/* file_priv may still be in context_xa during file_priv_release() */
if (file_priv && !kref_get_unless_zero(&file_priv->ref))
file_priv = NULL;
xa_unlock_irq(&vdev->context_xa);
if (file_priv)
ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n",
file_priv->ctx.id, kref_read(&file_priv->ref));
return file_priv; ivpu_cmdq_release_all_locked(file_priv);
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
file_priv->bound = false;
drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
}
mutex_unlock(&file_priv->lock);
} }
static void file_priv_release(struct kref *ref) static void file_priv_release(struct kref *ref)
...@@ -88,13 +88,15 @@ static void file_priv_release(struct kref *ref) ...@@ -88,13 +88,15 @@ static void file_priv_release(struct kref *ref)
struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref); struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
struct ivpu_device *vdev = file_priv->vdev; struct ivpu_device *vdev = file_priv->vdev;
ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id); ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n",
file_priv->ctx.id, (bool)file_priv->bound);
pm_runtime_get_sync(vdev->drm.dev);
mutex_lock(&vdev->context_list_lock);
file_priv_unbind(vdev, file_priv);
mutex_unlock(&vdev->context_list_lock);
pm_runtime_put_autosuspend(vdev->drm.dev);
ivpu_cmdq_release_all(file_priv);
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
ivpu_bo_remove_all_bos_from_context(vdev, &file_priv->ctx);
ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
mutex_destroy(&file_priv->lock); mutex_destroy(&file_priv->lock);
kfree(file_priv); kfree(file_priv);
} }
...@@ -176,9 +178,6 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f ...@@ -176,9 +178,6 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS: case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
args->value = vdev->hw->ranges.user.start; args->value = vdev->hw->ranges.user.start;
break; break;
case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
args->value = file_priv->priority;
break;
case DRM_IVPU_PARAM_CONTEXT_ID: case DRM_IVPU_PARAM_CONTEXT_ID:
args->value = file_priv->ctx.id; args->value = file_priv->ctx.id;
break; break;
...@@ -218,17 +217,10 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f ...@@ -218,17 +217,10 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{ {
struct ivpu_file_priv *file_priv = file->driver_priv;
struct drm_ivpu_param *args = data; struct drm_ivpu_param *args = data;
int ret = 0; int ret = 0;
switch (args->param) { switch (args->param) {
case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
if (args->value <= DRM_IVPU_CONTEXT_PRIORITY_REALTIME)
file_priv->priority = args->value;
else
ret = -EINVAL;
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
} }
...@@ -241,50 +233,53 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file) ...@@ -241,50 +233,53 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
struct ivpu_device *vdev = to_ivpu_device(dev); struct ivpu_device *vdev = to_ivpu_device(dev);
struct ivpu_file_priv *file_priv; struct ivpu_file_priv *file_priv;
u32 ctx_id; u32 ctx_id;
void *old; int idx, ret;
int ret;
ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL); if (!drm_dev_enter(dev, &idx))
if (ret) { return -ENODEV;
ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
return ret;
}
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv) { if (!file_priv) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_xa_erase; goto err_dev_exit;
} }
file_priv->vdev = vdev; file_priv->vdev = vdev;
file_priv->priority = DRM_IVPU_CONTEXT_PRIORITY_NORMAL; file_priv->bound = true;
kref_init(&file_priv->ref); kref_init(&file_priv->ref);
mutex_init(&file_priv->lock); mutex_init(&file_priv->lock);
mutex_lock(&vdev->context_list_lock);
ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
vdev->context_xa_limit, GFP_KERNEL);
if (ret) {
ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
goto err_unlock;
}
ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id); ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
if (ret) if (ret)
goto err_mutex_destroy; goto err_xa_erase;
old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL); mutex_unlock(&vdev->context_list_lock);
if (xa_is_err(old)) { drm_dev_exit(idx);
ret = xa_err(old);
ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret); file->driver_priv = file_priv;
goto err_ctx_fini;
}
ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n", ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
ctx_id, current->comm, task_pid_nr(current)); ctx_id, current->comm, task_pid_nr(current));
file->driver_priv = file_priv;
return 0; return 0;
err_ctx_fini:
ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
err_mutex_destroy:
mutex_destroy(&file_priv->lock);
kfree(file_priv);
err_xa_erase: err_xa_erase:
xa_erase_irq(&vdev->context_xa, ctx_id); xa_erase_irq(&vdev->context_xa, ctx_id);
err_unlock:
mutex_unlock(&vdev->context_list_lock);
mutex_destroy(&file_priv->lock);
kfree(file_priv);
err_dev_exit:
drm_dev_exit(idx);
return ret; return ret;
} }
...@@ -340,8 +335,6 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev) ...@@ -340,8 +335,6 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
if (!ret) if (!ret)
ivpu_dbg(vdev, PM, "VPU ready message received successfully\n"); ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
else
ivpu_hw_diagnose_failure(vdev);
return ret; return ret;
} }
...@@ -369,6 +362,9 @@ int ivpu_boot(struct ivpu_device *vdev) ...@@ -369,6 +362,9 @@ int ivpu_boot(struct ivpu_device *vdev)
ret = ivpu_wait_for_ready(vdev); ret = ivpu_wait_for_ready(vdev);
if (ret) { if (ret) {
ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret); ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
ivpu_hw_diagnose_failure(vdev);
ivpu_mmu_evtq_dump(vdev);
ivpu_fw_log_dump(vdev);
return ret; return ret;
} }
...@@ -540,6 +536,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev) ...@@ -540,6 +536,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key); lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list); INIT_LIST_HEAD(&vdev->bo_list);
ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
if (ret)
goto err_xa_destroy;
ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock); ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
if (ret) if (ret)
goto err_xa_destroy; goto err_xa_destroy;
...@@ -611,14 +611,30 @@ static int ivpu_dev_init(struct ivpu_device *vdev) ...@@ -611,14 +611,30 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
return ret; return ret;
} }
static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
{
struct ivpu_file_priv *file_priv;
unsigned long ctx_id;
mutex_lock(&vdev->context_list_lock);
xa_for_each(&vdev->context_xa, ctx_id, file_priv)
file_priv_unbind(vdev, file_priv);
mutex_unlock(&vdev->context_list_lock);
}
static void ivpu_dev_fini(struct ivpu_device *vdev) static void ivpu_dev_fini(struct ivpu_device *vdev)
{ {
ivpu_pm_disable(vdev); ivpu_pm_disable(vdev);
ivpu_shutdown(vdev); ivpu_shutdown(vdev);
if (IVPU_WA(d3hot_after_power_off)) if (IVPU_WA(d3hot_after_power_off))
pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
ivpu_jobs_abort_all(vdev);
ivpu_job_done_consumer_fini(vdev); ivpu_job_done_consumer_fini(vdev);
ivpu_pm_cancel_recovery(vdev); ivpu_pm_cancel_recovery(vdev);
ivpu_bo_unbind_all_user_contexts(vdev);
ivpu_ipc_fini(vdev); ivpu_ipc_fini(vdev);
ivpu_fw_fini(vdev); ivpu_fw_fini(vdev);
......
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#define IVPU_DBG_JSM BIT(10) #define IVPU_DBG_JSM BIT(10)
#define IVPU_DBG_KREF BIT(11) #define IVPU_DBG_KREF BIT(11)
#define IVPU_DBG_RPM BIT(12) #define IVPU_DBG_RPM BIT(12)
#define IVPU_DBG_MMU_MAP BIT(13)
#define ivpu_err(vdev, fmt, ...) \ #define ivpu_err(vdev, fmt, ...) \
drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__) drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
...@@ -114,6 +115,7 @@ struct ivpu_device { ...@@ -114,6 +115,7 @@ struct ivpu_device {
struct ivpu_mmu_context gctx; struct ivpu_mmu_context gctx;
struct ivpu_mmu_context rctx; struct ivpu_mmu_context rctx;
struct mutex context_list_lock; /* Protects user context addition/removal */
struct xarray context_xa; struct xarray context_xa;
struct xa_limit context_xa_limit; struct xa_limit context_xa_limit;
...@@ -145,8 +147,8 @@ struct ivpu_file_priv { ...@@ -145,8 +147,8 @@ struct ivpu_file_priv {
struct mutex lock; /* Protects cmdq */ struct mutex lock; /* Protects cmdq */
struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES]; struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES];
struct ivpu_mmu_context ctx; struct ivpu_mmu_context ctx;
u32 priority;
bool has_mmu_faults; bool has_mmu_faults;
bool bound;
}; };
extern int ivpu_dbg_mask; extern int ivpu_dbg_mask;
...@@ -162,7 +164,6 @@ extern bool ivpu_disable_mmu_cont_pages; ...@@ -162,7 +164,6 @@ extern bool ivpu_disable_mmu_cont_pages;
extern int ivpu_test_mode; extern int ivpu_test_mode;
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv); struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id);
void ivpu_file_priv_put(struct ivpu_file_priv **link); void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev); int ivpu_boot(struct ivpu_device *vdev);
......
...@@ -24,14 +24,11 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs; ...@@ -24,14 +24,11 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs;
static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action) static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
{ {
if (bo->ctx) ivpu_dbg(vdev, BO,
ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u ctx %d vpu_addr 0x%llx mmu_mapped %d\n", "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt, action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
bo->handle, bo->ctx->id, bo->vpu_addr, bo->mmu_mapped); (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
else (bool)bo->base.base.import_attach);
ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u (not added to context)\n",
action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt,
bo->handle);
} }
/* /*
...@@ -49,12 +46,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo) ...@@ -49,12 +46,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
mutex_lock(&bo->lock); mutex_lock(&bo->lock);
ivpu_dbg_bo(vdev, bo, "pin"); ivpu_dbg_bo(vdev, bo, "pin");
drm_WARN_ON(&vdev->drm, !bo->ctx);
if (!bo->ctx) {
ivpu_err(vdev, "vpu_addr not allocated for BO %d\n", bo->handle);
ret = -EINVAL;
goto unlock;
}
if (!bo->mmu_mapped) { if (!bo->mmu_mapped) {
struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base); struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
...@@ -85,7 +77,10 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, ...@@ -85,7 +77,10 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
const struct ivpu_addr_range *range) const struct ivpu_addr_range *range)
{ {
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
int ret; int idx, ret;
if (!drm_dev_enter(&vdev->drm, &idx))
return -ENODEV;
mutex_lock(&bo->lock); mutex_lock(&bo->lock);
...@@ -101,6 +96,8 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, ...@@ -101,6 +96,8 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
mutex_unlock(&bo->lock); mutex_unlock(&bo->lock);
drm_dev_exit(idx);
return ret; return ret;
} }
...@@ -108,11 +105,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo) ...@@ -108,11 +105,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
{ {
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
lockdep_assert_held(&bo->lock); lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount));
ivpu_dbg_bo(vdev, bo, "unbind");
/* TODO: dma_unmap */
if (bo->mmu_mapped) { if (bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx); drm_WARN_ON(&vdev->drm, !bo->ctx);
...@@ -124,19 +117,23 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo) ...@@ -124,19 +117,23 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
if (bo->ctx) { if (bo->ctx) {
ivpu_mmu_context_remove_node(bo->ctx, &bo->mm_node); ivpu_mmu_context_remove_node(bo->ctx, &bo->mm_node);
bo->vpu_addr = 0;
bo->ctx = NULL; bo->ctx = NULL;
} }
}
static void ivpu_bo_unbind(struct ivpu_bo *bo) if (bo->base.base.import_attach)
{ return;
mutex_lock(&bo->lock);
ivpu_bo_unbind_locked(bo); dma_resv_lock(bo->base.base.resv, NULL);
mutex_unlock(&bo->lock); if (bo->base.sgt) {
dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(bo->base.sgt);
kfree(bo->base.sgt);
bo->base.sgt = NULL;
}
dma_resv_unlock(bo->base.base.resv);
} }
void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{ {
struct ivpu_bo *bo; struct ivpu_bo *bo;
...@@ -146,8 +143,10 @@ void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m ...@@ -146,8 +143,10 @@ void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
mutex_lock(&vdev->bo_list_lock); mutex_lock(&vdev->bo_list_lock);
list_for_each_entry(bo, &vdev->bo_list, bo_list_node) { list_for_each_entry(bo, &vdev->bo_list, bo_list_node) {
mutex_lock(&bo->lock); mutex_lock(&bo->lock);
if (bo->ctx == ctx) if (bo->ctx == ctx) {
ivpu_dbg_bo(vdev, bo, "unbind");
ivpu_bo_unbind_locked(bo); ivpu_bo_unbind_locked(bo);
}
mutex_unlock(&bo->lock); mutex_unlock(&bo->lock);
} }
mutex_unlock(&vdev->bo_list_lock); mutex_unlock(&vdev->bo_list_lock);
...@@ -199,9 +198,6 @@ ivpu_bo_create(struct ivpu_device *vdev, u64 size, u32 flags) ...@@ -199,9 +198,6 @@ ivpu_bo_create(struct ivpu_device *vdev, u64 size, u32 flags)
list_add_tail(&bo->bo_list_node, &vdev->bo_list); list_add_tail(&bo->bo_list_node, &vdev->bo_list);
mutex_unlock(&vdev->bo_list_lock); mutex_unlock(&vdev->bo_list_lock);
ivpu_dbg(vdev, BO, "create: vpu_addr 0x%llx size %zu flags 0x%x\n",
bo->vpu_addr, bo->base.base.size, flags);
return bo; return bo;
} }
...@@ -212,6 +208,12 @@ static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file) ...@@ -212,6 +208,12 @@ static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file)
struct ivpu_bo *bo = to_ivpu_bo(obj); struct ivpu_bo *bo = to_ivpu_bo(obj);
struct ivpu_addr_range *range; struct ivpu_addr_range *range;
if (bo->ctx) {
ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n",
file_priv->ctx.id, bo->ctx->id);
return -EALREADY;
}
if (bo->flags & DRM_IVPU_BO_SHAVE_MEM) if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
range = &vdev->hw->ranges.shave; range = &vdev->hw->ranges.shave;
else if (bo->flags & DRM_IVPU_BO_DMA_MEM) else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
...@@ -227,62 +229,24 @@ static void ivpu_bo_free(struct drm_gem_object *obj) ...@@ -227,62 +229,24 @@ static void ivpu_bo_free(struct drm_gem_object *obj)
struct ivpu_device *vdev = to_ivpu_device(obj->dev); struct ivpu_device *vdev = to_ivpu_device(obj->dev);
struct ivpu_bo *bo = to_ivpu_bo(obj); struct ivpu_bo *bo = to_ivpu_bo(obj);
ivpu_dbg_bo(vdev, bo, "free");
mutex_lock(&vdev->bo_list_lock); mutex_lock(&vdev->bo_list_lock);
list_del(&bo->bo_list_node); list_del(&bo->bo_list_node);
mutex_unlock(&vdev->bo_list_lock); mutex_unlock(&vdev->bo_list_lock);
drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
ivpu_dbg_bo(vdev, bo, "free"); ivpu_bo_unbind_locked(bo);
ivpu_bo_unbind(bo);
mutex_destroy(&bo->lock); mutex_destroy(&bo->lock);
drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1); drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
drm_gem_shmem_free(&bo->base); drm_gem_shmem_free(&bo->base);
} }
static const struct dma_buf_ops ivpu_bo_dmabuf_ops = {
.cache_sgt_mapping = true,
.attach = drm_gem_map_attach,
.detach = drm_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
};
static struct dma_buf *ivpu_bo_export(struct drm_gem_object *obj, int flags)
{
struct drm_device *dev = obj->dev;
struct dma_buf_export_info exp_info = {
.exp_name = KBUILD_MODNAME,
.owner = dev->driver->fops->owner,
.ops = &ivpu_bo_dmabuf_ops,
.size = obj->size,
.flags = flags,
.priv = obj,
.resv = obj->resv,
};
void *sgt;
/*
* Make sure that pages are allocated and dma-mapped before exporting the bo.
* DMA-mapping is required if the bo will be imported to the same device.
*/
sgt = drm_gem_shmem_get_pages_sgt(to_drm_gem_shmem_obj(obj));
if (IS_ERR(sgt))
return sgt;
return drm_gem_dmabuf_export(dev, &exp_info);
}
static const struct drm_gem_object_funcs ivpu_gem_funcs = { static const struct drm_gem_object_funcs ivpu_gem_funcs = {
.free = ivpu_bo_free, .free = ivpu_bo_free,
.open = ivpu_bo_open, .open = ivpu_bo_open,
.export = ivpu_bo_export,
.print_info = drm_gem_shmem_object_print_info, .print_info = drm_gem_shmem_object_print_info,
.pin = drm_gem_shmem_object_pin, .pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin, .unpin = drm_gem_shmem_object_unpin,
...@@ -315,11 +279,9 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi ...@@ -315,11 +279,9 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
return PTR_ERR(bo); return PTR_ERR(bo);
} }
ret = drm_gem_handle_create(file, &bo->base.base, &bo->handle); ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
if (!ret) { if (!ret)
args->vpu_addr = bo->vpu_addr; args->vpu_addr = bo->vpu_addr;
args->handle = bo->handle;
}
drm_gem_object_put(&bo->base.base); drm_gem_object_put(&bo->base.base);
...@@ -361,7 +323,9 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla ...@@ -361,7 +323,9 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
if (ret) if (ret)
goto err_put; goto err_put;
dma_resv_lock(bo->base.base.resv, NULL);
ret = drm_gem_shmem_vmap(&bo->base, &map); ret = drm_gem_shmem_vmap(&bo->base, &map);
dma_resv_unlock(bo->base.base.resv);
if (ret) if (ret)
goto err_put; goto err_put;
...@@ -376,7 +340,10 @@ void ivpu_bo_free_internal(struct ivpu_bo *bo) ...@@ -376,7 +340,10 @@ void ivpu_bo_free_internal(struct ivpu_bo *bo)
{ {
struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr); struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
dma_resv_lock(bo->base.base.resv, NULL);
drm_gem_shmem_vunmap(&bo->base, &map); drm_gem_shmem_vunmap(&bo->base, &map);
dma_resv_unlock(bo->base.base.resv);
drm_gem_object_put(&bo->base.base); drm_gem_object_put(&bo->base.base);
} }
...@@ -432,19 +399,11 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file ...@@ -432,19 +399,11 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p) static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
{ {
unsigned long dma_refcount = 0;
mutex_lock(&bo->lock); mutex_lock(&bo->lock);
if (bo->base.base.dma_buf && bo->base.base.dma_buf->file) drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
dma_refcount = atomic_long_read(&bo->base.base.dma_buf->file->f_count); bo, bo->ctx->id, bo->vpu_addr, bo->base.base.size,
bo->flags, kref_read(&bo->base.base.refcount));
drm_printf(p, "%-3u %-6d 0x%-12llx %-10lu 0x%-8x %-4u %-8lu",
bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.base.size,
bo->flags, kref_read(&bo->base.base.refcount), dma_refcount);
if (bo->base.base.import_attach)
drm_printf(p, " imported");
if (bo->base.pages) if (bo->base.pages)
drm_printf(p, " has_pages"); drm_printf(p, " has_pages");
...@@ -452,6 +411,9 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p) ...@@ -452,6 +411,9 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
if (bo->mmu_mapped) if (bo->mmu_mapped)
drm_printf(p, " mmu_mapped"); drm_printf(p, " mmu_mapped");
if (bo->base.base.import_attach)
drm_printf(p, " imported");
drm_printf(p, "\n"); drm_printf(p, "\n");
mutex_unlock(&bo->lock); mutex_unlock(&bo->lock);
...@@ -462,8 +424,8 @@ void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p) ...@@ -462,8 +424,8 @@ void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
struct ivpu_device *vdev = to_ivpu_device(dev); struct ivpu_device *vdev = to_ivpu_device(dev);
struct ivpu_bo *bo; struct ivpu_bo *bo;
drm_printf(p, "%-3s %-6s %-14s %-10s %-10s %-4s %-8s %s\n", drm_printf(p, "%-9s %-3s %-14s %-10s %-10s %-4s %s\n",
"ctx", "handle", "vpu_addr", "size", "flags", "refs", "dma_refs", "attribs"); "bo", "ctx", "vpu_addr", "size", "flags", "refs", "attribs");
mutex_lock(&vdev->bo_list_lock); mutex_lock(&vdev->bo_list_lock);
list_for_each_entry(bo, &vdev->bo_list, bo_list_node) list_for_each_entry(bo, &vdev->bo_list, bo_list_node)
......
...@@ -19,14 +19,13 @@ struct ivpu_bo { ...@@ -19,14 +19,13 @@ struct ivpu_bo {
struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */ struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */
u64 vpu_addr; u64 vpu_addr;
u32 handle;
u32 flags; u32 flags;
u32 job_status; /* Valid only for command buffer */ u32 job_status; /* Valid only for command buffer */
bool mmu_mapped; bool mmu_mapped;
}; };
int ivpu_bo_pin(struct ivpu_bo *bo); int ivpu_bo_pin(struct ivpu_bo *bo);
void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size); struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
struct ivpu_bo *ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags); struct ivpu_bo *ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
......
...@@ -875,24 +875,18 @@ static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev) ...@@ -875,24 +875,18 @@ static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev) static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
{ {
ivpu_err_ratelimited(vdev, "WDT NCE irq\n"); ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
ivpu_pm_schedule_recovery(vdev);
} }
static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev) static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
{ {
ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
ivpu_hw_wdt_disable(vdev); ivpu_hw_wdt_disable(vdev);
ivpu_pm_schedule_recovery(vdev); ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
} }
static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev) static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
{ {
ivpu_err_ratelimited(vdev, "NOC Firewall irq\n"); ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
ivpu_pm_schedule_recovery(vdev);
} }
/* Handler for IRQs from VPU core (irqV) */ /* Handler for IRQs from VPU core (irqV) */
...@@ -970,7 +964,7 @@ static bool ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq) ...@@ -970,7 +964,7 @@ static bool ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status); REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
if (schedule_recovery) if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev); ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
return true; return true;
} }
......
...@@ -746,7 +746,7 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev) ...@@ -746,7 +746,7 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
return 0; return 0;
} }
static int ivpu_hw_40xx_reset(struct ivpu_device *vdev) static int ivpu_hw_40xx_ip_reset(struct ivpu_device *vdev)
{ {
int ret; int ret;
u32 val; u32 val;
...@@ -768,6 +768,23 @@ static int ivpu_hw_40xx_reset(struct ivpu_device *vdev) ...@@ -768,6 +768,23 @@ static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
return ret; return ret;
} }
static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
{
int ret = 0;
if (ivpu_hw_40xx_ip_reset(vdev)) {
ivpu_err(vdev, "Failed to reset VPU IP\n");
ret = -EIO;
}
if (ivpu_pll_disable(vdev)) {
ivpu_err(vdev, "Failed to disable PLL\n");
ret = -EIO;
}
return ret;
}
static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev) static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev)
{ {
int ret; int ret;
...@@ -913,7 +930,7 @@ static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev) ...@@ -913,7 +930,7 @@ static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev)
ivpu_hw_40xx_save_d0i3_entry_timestamp(vdev); ivpu_hw_40xx_save_d0i3_entry_timestamp(vdev);
if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_reset(vdev)) if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_ip_reset(vdev))
ivpu_warn(vdev, "Failed to reset the VPU\n"); ivpu_warn(vdev, "Failed to reset the VPU\n");
if (ivpu_pll_disable(vdev)) { if (ivpu_pll_disable(vdev)) {
...@@ -1032,18 +1049,18 @@ static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev) ...@@ -1032,18 +1049,18 @@ static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev)
static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev) static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
{ {
/* TODO: For LNN hang consider engine reset instead of full recovery */ /* TODO: For LNN hang consider engine reset instead of full recovery */
ivpu_pm_schedule_recovery(vdev); ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
} }
static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev) static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
{ {
ivpu_hw_wdt_disable(vdev); ivpu_hw_wdt_disable(vdev);
ivpu_pm_schedule_recovery(vdev); ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
} }
static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev) static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
{ {
ivpu_pm_schedule_recovery(vdev); ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
} }
/* Handler for IRQs from VPU core (irqV) */ /* Handler for IRQs from VPU core (irqV) */
...@@ -1137,7 +1154,7 @@ static bool ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq) ...@@ -1137,7 +1154,7 @@ static bool ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status); REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
if (schedule_recovery) if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev); ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
return true; return true;
} }
......
...@@ -343,10 +343,8 @@ int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *r ...@@ -343,10 +343,8 @@ int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *r
hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
&hb_resp, VPU_IPC_CHAN_ASYNC_CMD, &hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
vdev->timeout.jsm); vdev->timeout.jsm);
if (hb_ret == -ETIMEDOUT) { if (hb_ret == -ETIMEDOUT)
ivpu_hw_diagnose_failure(vdev); ivpu_pm_trigger_recovery(vdev, "IPC timeout");
ivpu_pm_schedule_recovery(vdev);
}
return ret; return ret;
} }
......
...@@ -112,22 +112,20 @@ static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engin ...@@ -112,22 +112,20 @@ static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engin
} }
} }
void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv) void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
{ {
int i; int i;
mutex_lock(&file_priv->lock); lockdep_assert_held(&file_priv->lock);
for (i = 0; i < IVPU_NUM_ENGINES; i++) for (i = 0; i < IVPU_NUM_ENGINES; i++)
ivpu_cmdq_release_locked(file_priv, i); ivpu_cmdq_release_locked(file_priv, i);
mutex_unlock(&file_priv->lock);
} }
/* /*
* Mark the doorbell as unregistered and reset job queue pointers. * Mark the doorbell as unregistered and reset job queue pointers.
* This function needs to be called when the VPU hardware is restarted * This function needs to be called when the VPU hardware is restarted
* and FW looses job queue state. The next time job queue is used it * and FW loses job queue state. The next time job queue is used it
* will be registered again. * will be registered again.
*/ */
static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine) static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
...@@ -161,15 +159,13 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev) ...@@ -161,15 +159,13 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
struct ivpu_file_priv *file_priv; struct ivpu_file_priv *file_priv;
unsigned long ctx_id; unsigned long ctx_id;
xa_for_each(&vdev->context_xa, ctx_id, file_priv) { mutex_lock(&vdev->context_list_lock);
file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
if (!file_priv)
continue;
xa_for_each(&vdev->context_xa, ctx_id, file_priv)
ivpu_cmdq_reset_all(file_priv); ivpu_cmdq_reset_all(file_priv);
ivpu_file_priv_put(&file_priv); mutex_unlock(&vdev->context_list_lock);
}
} }
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
...@@ -243,60 +239,32 @@ static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev) ...@@ -243,60 +239,32 @@ static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
return &fence->base; return &fence->base;
} }
static void job_get(struct ivpu_job *job, struct ivpu_job **link) static void ivpu_job_destroy(struct ivpu_job *job)
{ {
struct ivpu_device *vdev = job->vdev; struct ivpu_device *vdev = job->vdev;
kref_get(&job->ref);
*link = job;
ivpu_dbg(vdev, KREF, "Job get: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
}
static void job_release(struct kref *ref)
{
struct ivpu_job *job = container_of(ref, struct ivpu_job, ref);
struct ivpu_device *vdev = job->vdev;
u32 i; u32 i;
ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d",
job->job_id, job->file_priv->ctx.id, job->engine_idx);
for (i = 0; i < job->bo_count; i++) for (i = 0; i < job->bo_count; i++)
if (job->bos[i]) if (job->bos[i])
drm_gem_object_put(&job->bos[i]->base.base); drm_gem_object_put(&job->bos[i]->base.base);
dma_fence_put(job->done_fence); dma_fence_put(job->done_fence);
ivpu_file_priv_put(&job->file_priv); ivpu_file_priv_put(&job->file_priv);
ivpu_dbg(vdev, KREF, "Job released: id %u\n", job->job_id);
kfree(job); kfree(job);
/* Allow the VPU to get suspended, must be called after ivpu_file_priv_put() */
ivpu_rpm_put(vdev);
}
static void job_put(struct ivpu_job *job)
{
struct ivpu_device *vdev = job->vdev;
ivpu_dbg(vdev, KREF, "Job put: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
kref_put(&job->ref, job_release);
} }
static struct ivpu_job * static struct ivpu_job *
ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count) ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
{ {
struct ivpu_device *vdev = file_priv->vdev; struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_job *job; struct ivpu_job *job;
int ret;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return NULL;
job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL); job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL);
if (!job) if (!job)
goto err_rpm_put; return NULL;
kref_init(&job->ref);
job->vdev = vdev; job->vdev = vdev;
job->engine_idx = engine_idx; job->engine_idx = engine_idx;
...@@ -310,17 +278,14 @@ ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count) ...@@ -310,17 +278,14 @@ ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
job->file_priv = ivpu_file_priv_get(file_priv); job->file_priv = ivpu_file_priv_get(file_priv);
ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx); ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
return job; return job;
err_free_job: err_free_job:
kfree(job); kfree(job);
err_rpm_put:
ivpu_rpm_put(vdev);
return NULL; return NULL;
} }
static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status) static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
{ {
struct ivpu_job *job; struct ivpu_job *job;
...@@ -337,9 +302,10 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status) ...@@ -337,9 +302,10 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n", ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n",
job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status); job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
ivpu_job_destroy(job);
ivpu_stop_job_timeout_detection(vdev); ivpu_stop_job_timeout_detection(vdev);
job_put(job); ivpu_rpm_put(vdev);
return 0; return 0;
} }
...@@ -349,10 +315,10 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev) ...@@ -349,10 +315,10 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
unsigned long id; unsigned long id;
xa_for_each(&vdev->submitted_jobs_xa, id, job) xa_for_each(&vdev->submitted_jobs_xa, id, job)
ivpu_job_done(vdev, id, VPU_JSM_STATUS_ABORTED); ivpu_job_signal_and_destroy(vdev, id, VPU_JSM_STATUS_ABORTED);
} }
static int ivpu_direct_job_submission(struct ivpu_job *job) static int ivpu_job_submit(struct ivpu_job *job)
{ {
struct ivpu_file_priv *file_priv = job->file_priv; struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = job->vdev; struct ivpu_device *vdev = job->vdev;
...@@ -360,53 +326,65 @@ static int ivpu_direct_job_submission(struct ivpu_job *job) ...@@ -360,53 +326,65 @@ static int ivpu_direct_job_submission(struct ivpu_job *job)
struct ivpu_cmdq *cmdq; struct ivpu_cmdq *cmdq;
int ret; int ret;
ret = ivpu_rpm_get(vdev);
if (ret < 0)
return ret;
mutex_lock(&file_priv->lock); mutex_lock(&file_priv->lock);
cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx); cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
if (!cmdq) { if (!cmdq) {
ivpu_warn(vdev, "Failed get job queue, ctx %d engine %d\n", ivpu_warn_ratelimited(vdev, "Failed get job queue, ctx %d engine %d\n",
file_priv->ctx.id, job->engine_idx); file_priv->ctx.id, job->engine_idx);
ret = -EINVAL; ret = -EINVAL;
goto err_unlock; goto err_unlock_file_priv;
} }
job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1)); job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK; job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
job_get(job, &job); xa_lock(&vdev->submitted_jobs_xa);
ret = xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL); ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
if (ret) { if (ret) {
ivpu_warn_ratelimited(vdev, "Failed to allocate job id: %d\n", ret); ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
goto err_job_put; file_priv->ctx.id);
ret = -EBUSY;
goto err_unlock_submitted_jobs_xa;
} }
ret = ivpu_cmdq_push_job(cmdq, job); ret = ivpu_cmdq_push_job(cmdq, job);
if (ret) if (ret)
goto err_xa_erase; goto err_erase_xa;
ivpu_start_job_timeout_detection(vdev); ivpu_start_job_timeout_detection(vdev);
ivpu_dbg(vdev, JOB, "Job submitted: id %3u addr 0x%llx ctx %2d engine %d next %d\n", if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
job->job_id, job->cmd_buf_vpu_addr, file_priv->ctx.id,
job->engine_idx, cmdq->jobq->header.tail);
if (ivpu_test_mode & IVPU_TEST_MODE_NULL_HW) {
ivpu_job_done(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
cmdq->jobq->header.head = cmdq->jobq->header.tail; cmdq->jobq->header.head = cmdq->jobq->header.tail;
wmb(); /* Flush WC buffer for jobq header */ wmb(); /* Flush WC buffer for jobq header */
} else { } else {
ivpu_cmdq_ring_db(vdev, cmdq); ivpu_cmdq_ring_db(vdev, cmdq);
} }
ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d addr 0x%llx next %d\n",
job->job_id, file_priv->ctx.id, job->engine_idx,
job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
xa_unlock(&vdev->submitted_jobs_xa);
mutex_unlock(&file_priv->lock); mutex_unlock(&file_priv->lock);
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
return 0; return 0;
err_xa_erase: err_erase_xa:
xa_erase(&vdev->submitted_jobs_xa, job->job_id); __xa_erase(&vdev->submitted_jobs_xa, job->job_id);
err_job_put: err_unlock_submitted_jobs_xa:
job_put(job); xa_unlock(&vdev->submitted_jobs_xa);
err_unlock: err_unlock_file_priv:
mutex_unlock(&file_priv->lock); mutex_unlock(&file_priv->lock);
ivpu_rpm_put(vdev);
return ret; return ret;
} }
...@@ -488,6 +466,9 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -488,6 +466,9 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (params->engine > DRM_IVPU_ENGINE_COPY) if (params->engine > DRM_IVPU_ENGINE_COPY)
return -EINVAL; return -EINVAL;
if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
return -EINVAL;
if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT) if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
return -EINVAL; return -EINVAL;
...@@ -509,44 +490,49 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -509,44 +490,49 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
params->buffer_count * sizeof(u32)); params->buffer_count * sizeof(u32));
if (ret) { if (ret) {
ret = -EFAULT; ret = -EFAULT;
goto free_handles; goto err_free_handles;
} }
if (!drm_dev_enter(&vdev->drm, &idx)) { if (!drm_dev_enter(&vdev->drm, &idx)) {
ret = -ENODEV; ret = -ENODEV;
goto free_handles; goto err_free_handles;
} }
ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n", ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
file_priv->ctx.id, params->buffer_count); file_priv->ctx.id, params->buffer_count);
job = ivpu_create_job(file_priv, params->engine, params->buffer_count); job = ivpu_job_create(file_priv, params->engine, params->buffer_count);
if (!job) { if (!job) {
ivpu_err(vdev, "Failed to create job\n"); ivpu_err(vdev, "Failed to create job\n");
ret = -ENOMEM; ret = -ENOMEM;
goto dev_exit; goto err_exit_dev;
} }
ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count, ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
params->commands_offset); params->commands_offset);
if (ret) { if (ret) {
ivpu_err(vdev, "Failed to prepare job, ret %d\n", ret); ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
goto job_put; goto err_destroy_job;
} }
ret = ivpu_direct_job_submission(job); down_read(&vdev->pm->reset_lock);
if (ret) { ret = ivpu_job_submit(job);
dma_fence_signal(job->done_fence); up_read(&vdev->pm->reset_lock);
ivpu_err(vdev, "Failed to submit job to the HW, ret %d\n", ret); if (ret)
} goto err_signal_fence;
job_put:
job_put(job);
dev_exit:
drm_dev_exit(idx); drm_dev_exit(idx);
free_handles:
kfree(buf_handles); kfree(buf_handles);
return ret;
err_signal_fence:
dma_fence_signal(job->done_fence);
err_destroy_job:
ivpu_job_destroy(job);
err_exit_dev:
drm_dev_exit(idx);
err_free_handles:
kfree(buf_handles);
return ret; return ret;
} }
...@@ -568,7 +554,7 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr, ...@@ -568,7 +554,7 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
} }
payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload; payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
ret = ivpu_job_done(vdev, payload->job_id, payload->job_status); ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
if (!ret && !xa_empty(&vdev->submitted_jobs_xa)) if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
ivpu_start_job_timeout_detection(vdev); ivpu_start_job_timeout_detection(vdev);
} }
......
...@@ -43,7 +43,6 @@ struct ivpu_cmdq { ...@@ -43,7 +43,6 @@ struct ivpu_cmdq {
will update the job status will update the job status
*/ */
struct ivpu_job { struct ivpu_job {
struct kref ref;
struct ivpu_device *vdev; struct ivpu_device *vdev;
struct ivpu_file_priv *file_priv; struct ivpu_file_priv *file_priv;
struct dma_fence *done_fence; struct dma_fence *done_fence;
...@@ -56,7 +55,7 @@ struct ivpu_job { ...@@ -56,7 +55,7 @@ struct ivpu_job {
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv); void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv);
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev); void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
void ivpu_job_done_consumer_init(struct ivpu_device *vdev); void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include "ivpu_drv.h" #include "ivpu_drv.h"
#include "ivpu_hw.h"
#include "ivpu_hw_reg_io.h" #include "ivpu_hw_reg_io.h"
#include "ivpu_mmu.h" #include "ivpu_mmu.h"
#include "ivpu_mmu_context.h" #include "ivpu_mmu_context.h"
...@@ -518,6 +519,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev) ...@@ -518,6 +519,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
ivpu_err(vdev, "Timed out waiting for MMU consumer: %d, error: %s\n", ret, ivpu_err(vdev, "Timed out waiting for MMU consumer: %d, error: %s\n", ret,
ivpu_mmu_cmdq_err_to_str(err)); ivpu_mmu_cmdq_err_to_str(err));
ivpu_hw_diagnose_failure(vdev);
} }
return ret; return ret;
...@@ -885,7 +887,6 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev) ...@@ -885,7 +887,6 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev) void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
{ {
bool schedule_recovery = false;
u32 *event; u32 *event;
u32 ssid; u32 ssid;
...@@ -895,14 +896,21 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev) ...@@ -895,14 +896,21 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
ivpu_mmu_dump_event(vdev, event); ivpu_mmu_dump_event(vdev, event);
ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]); ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) {
schedule_recovery = true; ivpu_pm_trigger_recovery(vdev, "MMU event");
else return;
}
ivpu_mmu_user_context_mark_invalid(vdev, ssid); ivpu_mmu_user_context_mark_invalid(vdev, ssid);
} }
}
if (schedule_recovery) void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
ivpu_pm_schedule_recovery(vdev); {
u32 *event;
while ((event = ivpu_mmu_get_event(vdev)) != NULL)
ivpu_mmu_dump_event(vdev, event);
} }
void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev) void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
......
...@@ -46,5 +46,6 @@ int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid); ...@@ -46,5 +46,6 @@ int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev); void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);
void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev); void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev);
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev);
#endif /* __IVPU_MMU_H__ */ #endif /* __IVPU_MMU_H__ */
...@@ -355,6 +355,9 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, ...@@ -355,6 +355,9 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset; dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
size_t size = sg_dma_len(sg) + sg->offset; size_t size = sg_dma_len(sg) + sg->offset;
ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
ctx->id, dma_addr, vpu_addr, size);
ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot); ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
if (ret) { if (ret) {
ivpu_err(vdev, "Failed to map context pages\n"); ivpu_err(vdev, "Failed to map context pages\n");
...@@ -366,6 +369,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, ...@@ -366,6 +369,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
/* Ensure page table modifications are flushed from wc buffers to memory */ /* Ensure page table modifications are flushed from wc buffers to memory */
wmb(); wmb();
mutex_unlock(&ctx->lock); mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
...@@ -388,14 +392,19 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct ...@@ -388,14 +392,19 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
mutex_lock(&ctx->lock); mutex_lock(&ctx->lock);
for_each_sgtable_dma_sg(sgt, sg, i) { for_each_sgtable_dma_sg(sgt, sg, i) {
dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
size_t size = sg_dma_len(sg) + sg->offset; size_t size = sg_dma_len(sg) + sg->offset;
ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
ctx->id, dma_addr, vpu_addr, size);
ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size); ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
vpu_addr += size; vpu_addr += size;
} }
/* Ensure page table modifications are flushed from wc buffers to memory */ /* Ensure page table modifications are flushed from wc buffers to memory */
wmb(); wmb();
mutex_unlock(&ctx->lock); mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include "ivpu_drv.h" #include "ivpu_drv.h"
#include "ivpu_hw.h" #include "ivpu_hw.h"
#include "ivpu_fw.h" #include "ivpu_fw.h"
#include "ivpu_fw_log.h"
#include "ivpu_ipc.h" #include "ivpu_ipc.h"
#include "ivpu_job.h" #include "ivpu_job.h"
#include "ivpu_jsm_msg.h" #include "ivpu_jsm_msg.h"
...@@ -111,6 +112,14 @@ static void ivpu_pm_recovery_work(struct work_struct *work) ...@@ -111,6 +112,14 @@ static void ivpu_pm_recovery_work(struct work_struct *work)
char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL}; char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
int ret; int ret;
ivpu_err(vdev, "Recovering the VPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
ret = pm_runtime_resume_and_get(vdev->drm.dev);
if (ret)
ivpu_err(vdev, "Failed to resume VPU: %d\n", ret);
ivpu_fw_log_dump(vdev);
retry: retry:
ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev)); ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) { if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
...@@ -122,11 +131,13 @@ static void ivpu_pm_recovery_work(struct work_struct *work) ...@@ -122,11 +131,13 @@ static void ivpu_pm_recovery_work(struct work_struct *work)
ivpu_err(vdev, "Failed to reset VPU: %d\n", ret); ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt); kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
pm_runtime_mark_last_busy(vdev->drm.dev);
pm_runtime_put_autosuspend(vdev->drm.dev);
} }
void ivpu_pm_schedule_recovery(struct ivpu_device *vdev) void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
{ {
struct ivpu_pm_info *pm = vdev->pm; ivpu_err(vdev, "Recovery triggered by %s\n", reason);
if (ivpu_disable_recovery) { if (ivpu_disable_recovery) {
ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n"); ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n");
...@@ -138,10 +149,11 @@ void ivpu_pm_schedule_recovery(struct ivpu_device *vdev) ...@@ -138,10 +149,11 @@ void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
return; return;
} }
/* Schedule recovery if it's not in progress */ /* Trigger recovery if it's not in progress */
if (atomic_cmpxchg(&pm->in_reset, 0, 1) == 0) { if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) {
ivpu_hw_irq_disable(vdev); ivpu_hw_diagnose_failure(vdev);
queue_work(system_long_wq, &pm->recovery_work); ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */
queue_work(system_long_wq, &vdev->pm->recovery_work);
} }
} }
...@@ -149,12 +161,8 @@ static void ivpu_job_timeout_work(struct work_struct *work) ...@@ -149,12 +161,8 @@ static void ivpu_job_timeout_work(struct work_struct *work)
{ {
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work); struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
struct ivpu_device *vdev = pm->vdev; struct ivpu_device *vdev = pm->vdev;
unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
ivpu_err(vdev, "TDR detected, timeout %lu ms", timeout_ms); ivpu_pm_trigger_recovery(vdev, "TDR");
ivpu_hw_diagnose_failure(vdev);
ivpu_pm_schedule_recovery(vdev);
} }
void ivpu_start_job_timeout_detection(struct ivpu_device *vdev) void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
...@@ -227,6 +235,9 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev) ...@@ -227,6 +235,9 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
bool hw_is_idle = true; bool hw_is_idle = true;
int ret; int ret;
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work));
ivpu_dbg(vdev, PM, "Runtime suspend..\n"); ivpu_dbg(vdev, PM, "Runtime suspend..\n");
if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) { if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) {
...@@ -247,7 +258,8 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev) ...@@ -247,7 +258,8 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret); ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret);
if (!hw_is_idle) { if (!hw_is_idle) {
ivpu_warn(vdev, "VPU failed to enter idle, force suspended.\n"); ivpu_err(vdev, "VPU failed to enter idle, force suspended.\n");
ivpu_fw_log_dump(vdev);
ivpu_pm_prepare_cold_boot(vdev); ivpu_pm_prepare_cold_boot(vdev);
} else { } else {
ivpu_pm_prepare_warm_boot(vdev); ivpu_pm_prepare_warm_boot(vdev);
...@@ -308,11 +320,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev) ...@@ -308,11 +320,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
{ {
struct ivpu_device *vdev = pci_get_drvdata(pdev); struct ivpu_device *vdev = pci_get_drvdata(pdev);
pm_runtime_get_sync(vdev->drm.dev);
ivpu_dbg(vdev, PM, "Pre-reset..\n"); ivpu_dbg(vdev, PM, "Pre-reset..\n");
atomic_inc(&vdev->pm->reset_counter); atomic_inc(&vdev->pm->reset_counter);
atomic_set(&vdev->pm->in_reset, 1); atomic_set(&vdev->pm->reset_pending, 1);
pm_runtime_get_sync(vdev->drm.dev);
down_write(&vdev->pm->reset_lock);
ivpu_prepare_for_reset(vdev); ivpu_prepare_for_reset(vdev);
ivpu_hw_reset(vdev); ivpu_hw_reset(vdev);
ivpu_pm_prepare_cold_boot(vdev); ivpu_pm_prepare_cold_boot(vdev);
...@@ -329,9 +342,11 @@ void ivpu_pm_reset_done_cb(struct pci_dev *pdev) ...@@ -329,9 +342,11 @@ void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
ret = ivpu_resume(vdev); ret = ivpu_resume(vdev);
if (ret) if (ret)
ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret); ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
atomic_set(&vdev->pm->in_reset, 0); up_write(&vdev->pm->reset_lock);
atomic_set(&vdev->pm->reset_pending, 0);
ivpu_dbg(vdev, PM, "Post-reset done.\n"); ivpu_dbg(vdev, PM, "Post-reset done.\n");
pm_runtime_mark_last_busy(vdev->drm.dev);
pm_runtime_put_autosuspend(vdev->drm.dev); pm_runtime_put_autosuspend(vdev->drm.dev);
} }
...@@ -344,7 +359,10 @@ void ivpu_pm_init(struct ivpu_device *vdev) ...@@ -344,7 +359,10 @@ void ivpu_pm_init(struct ivpu_device *vdev)
pm->vdev = vdev; pm->vdev = vdev;
pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT; pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
atomic_set(&pm->in_reset, 0); init_rwsem(&pm->reset_lock);
atomic_set(&pm->reset_pending, 0);
atomic_set(&pm->reset_counter, 0);
INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work); INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work);
INIT_DELAYED_WORK(&pm->job_timeout_work, ivpu_job_timeout_work); INIT_DELAYED_WORK(&pm->job_timeout_work, ivpu_job_timeout_work);
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#ifndef __IVPU_PM_H__ #ifndef __IVPU_PM_H__
#define __IVPU_PM_H__ #define __IVPU_PM_H__
#include <linux/rwsem.h>
#include <linux/types.h> #include <linux/types.h>
struct ivpu_device; struct ivpu_device;
...@@ -14,8 +15,9 @@ struct ivpu_pm_info { ...@@ -14,8 +15,9 @@ struct ivpu_pm_info {
struct ivpu_device *vdev; struct ivpu_device *vdev;
struct delayed_work job_timeout_work; struct delayed_work job_timeout_work;
struct work_struct recovery_work; struct work_struct recovery_work;
atomic_t in_reset; struct rw_semaphore reset_lock;
atomic_t reset_counter; atomic_t reset_counter;
atomic_t reset_pending;
bool is_warmboot; bool is_warmboot;
u32 suspend_reschedule_counter; u32 suspend_reschedule_counter;
}; };
...@@ -37,7 +39,7 @@ int __must_check ivpu_rpm_get(struct ivpu_device *vdev); ...@@ -37,7 +39,7 @@ int __must_check ivpu_rpm_get(struct ivpu_device *vdev);
int __must_check ivpu_rpm_get_if_active(struct ivpu_device *vdev); int __must_check ivpu_rpm_get_if_active(struct ivpu_device *vdev);
void ivpu_rpm_put(struct ivpu_device *vdev); void ivpu_rpm_put(struct ivpu_device *vdev);
void ivpu_pm_schedule_recovery(struct ivpu_device *vdev); void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason);
void ivpu_start_job_timeout_detection(struct ivpu_device *vdev); void ivpu_start_job_timeout_detection(struct ivpu_device *vdev);
void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev); void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev);
......
...@@ -128,4 +128,4 @@ static __init int sysfb_init(void) ...@@ -128,4 +128,4 @@ static __init int sysfb_init(void)
} }
/* must execute after PCI subsystem for EFI quirks */ /* must execute after PCI subsystem for EFI quirks */
subsys_initcall_sync(sysfb_init); device_initcall(sysfb_init);
...@@ -1762,6 +1762,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux, ...@@ -1762,6 +1762,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
u8 request = msg->request & ~DP_AUX_I2C_MOT; u8 request = msg->request & ~DP_AUX_I2C_MOT;
int ret = 0; int ret = 0;
mutex_lock(&ctx->aux_lock);
pm_runtime_get_sync(dev); pm_runtime_get_sync(dev);
msg->reply = 0; msg->reply = 0;
switch (request) { switch (request) {
...@@ -1778,6 +1779,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux, ...@@ -1778,6 +1779,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
msg->size, msg->buffer); msg->size, msg->buffer);
pm_runtime_mark_last_busy(dev); pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev); pm_runtime_put_autosuspend(dev);
mutex_unlock(&ctx->aux_lock);
return ret; return ret;
} }
...@@ -2474,7 +2476,9 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge, ...@@ -2474,7 +2476,9 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
ctx->connector = NULL; ctx->connector = NULL;
anx7625_dp_stop(ctx); anx7625_dp_stop(ctx);
pm_runtime_put_sync(dev); mutex_lock(&ctx->aux_lock);
pm_runtime_put_sync_suspend(dev);
mutex_unlock(&ctx->aux_lock);
} }
static enum drm_connector_status static enum drm_connector_status
...@@ -2668,6 +2672,7 @@ static int anx7625_i2c_probe(struct i2c_client *client) ...@@ -2668,6 +2672,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
mutex_init(&platform->lock); mutex_init(&platform->lock);
mutex_init(&platform->hdcp_wq_lock); mutex_init(&platform->hdcp_wq_lock);
mutex_init(&platform->aux_lock);
INIT_DELAYED_WORK(&platform->hdcp_work, hdcp_check_work_func); INIT_DELAYED_WORK(&platform->hdcp_work, hdcp_check_work_func);
platform->hdcp_workqueue = create_workqueue("hdcp workqueue"); platform->hdcp_workqueue = create_workqueue("hdcp workqueue");
......
...@@ -475,6 +475,8 @@ struct anx7625_data { ...@@ -475,6 +475,8 @@ struct anx7625_data {
struct workqueue_struct *hdcp_workqueue; struct workqueue_struct *hdcp_workqueue;
/* Lock for hdcp work queue */ /* Lock for hdcp work queue */
struct mutex hdcp_wq_lock; struct mutex hdcp_wq_lock;
/* Lock for aux transfer and disable */
struct mutex aux_lock;
char edid_block; char edid_block;
struct display_timing dt; struct display_timing dt;
u8 display_timing_valid; u8 display_timing_valid;
......
...@@ -107,6 +107,7 @@ struct ps8640 { ...@@ -107,6 +107,7 @@ struct ps8640 {
struct device_link *link; struct device_link *link;
bool pre_enabled; bool pre_enabled;
bool need_post_hpd_delay; bool need_post_hpd_delay;
struct mutex aux_lock;
}; };
static const struct regmap_config ps8640_regmap_config[] = { static const struct regmap_config ps8640_regmap_config[] = {
...@@ -345,11 +346,20 @@ static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux, ...@@ -345,11 +346,20 @@ static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev; struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
int ret; int ret;
mutex_lock(&ps_bridge->aux_lock);
pm_runtime_get_sync(dev); pm_runtime_get_sync(dev);
ret = _ps8640_wait_hpd_asserted(ps_bridge, 200 * 1000);
if (ret) {
pm_runtime_put_sync_suspend(dev);
goto exit;
}
ret = ps8640_aux_transfer_msg(aux, msg); ret = ps8640_aux_transfer_msg(aux, msg);
pm_runtime_mark_last_busy(dev); pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev); pm_runtime_put_autosuspend(dev);
exit:
mutex_unlock(&ps_bridge->aux_lock);
return ret; return ret;
} }
...@@ -470,7 +480,18 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge, ...@@ -470,7 +480,18 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
ps_bridge->pre_enabled = false; ps_bridge->pre_enabled = false;
ps8640_bridge_vdo_control(ps_bridge, DISABLE); ps8640_bridge_vdo_control(ps_bridge, DISABLE);
/*
* The bridge seems to expect everything to be power cycled at the
* disable process, so grab a lock here to make sure
* ps8640_aux_transfer() is not holding a runtime PM reference and
* preventing the bridge from suspend.
*/
mutex_lock(&ps_bridge->aux_lock);
pm_runtime_put_sync_suspend(&ps_bridge->page[PAGE0_DP_CNTL]->dev); pm_runtime_put_sync_suspend(&ps_bridge->page[PAGE0_DP_CNTL]->dev);
mutex_unlock(&ps_bridge->aux_lock);
} }
static int ps8640_bridge_attach(struct drm_bridge *bridge, static int ps8640_bridge_attach(struct drm_bridge *bridge,
...@@ -619,6 +640,8 @@ static int ps8640_probe(struct i2c_client *client) ...@@ -619,6 +640,8 @@ static int ps8640_probe(struct i2c_client *client)
if (!ps_bridge) if (!ps_bridge)
return -ENOMEM; return -ENOMEM;
mutex_init(&ps_bridge->aux_lock);
ps_bridge->supplies[0].supply = "vdd12"; ps_bridge->supplies[0].supply = "vdd12";
ps_bridge->supplies[1].supply = "vdd33"; ps_bridge->supplies[1].supply = "vdd33";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies), ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
......
...@@ -1080,6 +1080,26 @@ static int sii902x_init(struct sii902x *sii902x) ...@@ -1080,6 +1080,26 @@ static int sii902x_init(struct sii902x *sii902x)
return ret; return ret;
} }
ret = sii902x_audio_codec_init(sii902x, dev);
if (ret)
return ret;
i2c_set_clientdata(sii902x->i2c, sii902x);
sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
1, 0, I2C_MUX_GATE,
sii902x_i2c_bypass_select,
sii902x_i2c_bypass_deselect);
if (!sii902x->i2cmux) {
ret = -ENOMEM;
goto err_unreg_audio;
}
sii902x->i2cmux->priv = sii902x;
ret = i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
if (ret)
goto err_unreg_audio;
sii902x->bridge.funcs = &sii902x_bridge_funcs; sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node; sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings; sii902x->bridge.timings = &default_sii902x_timings;
...@@ -1090,19 +1110,13 @@ static int sii902x_init(struct sii902x *sii902x) ...@@ -1090,19 +1110,13 @@ static int sii902x_init(struct sii902x *sii902x)
drm_bridge_add(&sii902x->bridge); drm_bridge_add(&sii902x->bridge);
sii902x_audio_codec_init(sii902x, dev); return 0;
i2c_set_clientdata(sii902x->i2c, sii902x);
sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev, err_unreg_audio:
1, 0, I2C_MUX_GATE, if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
sii902x_i2c_bypass_select, platform_device_unregister(sii902x->audio.pdev);
sii902x_i2c_bypass_deselect);
if (!sii902x->i2cmux)
return -ENOMEM;
sii902x->i2cmux->priv = sii902x; return ret;
return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
} }
static int sii902x_probe(struct i2c_client *client) static int sii902x_probe(struct i2c_client *client)
...@@ -1170,12 +1184,14 @@ static int sii902x_probe(struct i2c_client *client) ...@@ -1170,12 +1184,14 @@ static int sii902x_probe(struct i2c_client *client)
} }
static void sii902x_remove(struct i2c_client *client) static void sii902x_remove(struct i2c_client *client)
{ {
struct sii902x *sii902x = i2c_get_clientdata(client); struct sii902x *sii902x = i2c_get_clientdata(client);
i2c_mux_del_adapters(sii902x->i2cmux);
drm_bridge_remove(&sii902x->bridge); drm_bridge_remove(&sii902x->bridge);
i2c_mux_del_adapters(sii902x->i2cmux);
if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
platform_device_unregister(sii902x->audio.pdev);
} }
static const struct of_device_id sii902x_dt_ids[] = { static const struct of_device_id sii902x_dt_ids[] = {
......
...@@ -5491,6 +5491,7 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc); ...@@ -5491,6 +5491,7 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
* - 0 if the new state is valid * - 0 if the new state is valid
* - %-ENOSPC, if the new state is invalid, because of BW limitation * - %-ENOSPC, if the new state is invalid, because of BW limitation
* @failing_port is set to: * @failing_port is set to:
*
* - The non-root port where a BW limit check failed * - The non-root port where a BW limit check failed
* with all the ports downstream of @failing_port passing * with all the ports downstream of @failing_port passing
* the BW limit check. * the BW limit check.
...@@ -5499,6 +5500,7 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc); ...@@ -5499,6 +5500,7 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
* - %NULL if the BW limit check failed at the root port * - %NULL if the BW limit check failed at the root port
* with all the ports downstream of the root port passing * with all the ports downstream of the root port passing
* the BW limit check. * the BW limit check.
*
* - %-EINVAL, if the new state is invalid, because the root port has * - %-EINVAL, if the new state is invalid, because the root port has
* too many payloads. * too many payloads.
*/ */
......
...@@ -539,6 +539,8 @@ config DRM_PANEL_RAYDIUM_RM692E5 ...@@ -539,6 +539,8 @@ config DRM_PANEL_RAYDIUM_RM692E5
depends on OF depends on OF
depends on DRM_MIPI_DSI depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE depends on BACKLIGHT_CLASS_DEVICE
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
help help
Say Y here if you want to enable support for Raydium RM692E5-based Say Y here if you want to enable support for Raydium RM692E5-based
display panels, such as the one found in the Fairphone 5 smartphone. display panels, such as the one found in the Fairphone 5 smartphone.
......
...@@ -309,7 +309,7 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = { ...@@ -309,7 +309,7 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
.off_func = s6d7aa0_lsl080al02_off, .off_func = s6d7aa0_lsl080al02_off,
.drm_mode = &s6d7aa0_lsl080al02_mode, .drm_mode = &s6d7aa0_lsl080al02_mode,
.mode_flags = MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_NO_HFP, .mode_flags = MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_NO_HFP,
.bus_flags = DRM_BUS_FLAG_DE_HIGH, .bus_flags = 0,
.has_backlight = false, .has_backlight = false,
.use_passwd3 = false, .use_passwd3 = false,
......
...@@ -3948,6 +3948,7 @@ static const struct panel_desc tianma_tm070jdhg30 = { ...@@ -3948,6 +3948,7 @@ static const struct panel_desc tianma_tm070jdhg30 = {
}, },
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS, .connector_type = DRM_MODE_CONNECTOR_LVDS,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
}; };
static const struct panel_desc tianma_tm070jvhg33 = { static const struct panel_desc tianma_tm070jvhg33 = {
...@@ -3960,6 +3961,7 @@ static const struct panel_desc tianma_tm070jvhg33 = { ...@@ -3960,6 +3961,7 @@ static const struct panel_desc tianma_tm070jvhg33 = {
}, },
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS, .connector_type = DRM_MODE_CONNECTOR_LVDS,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
}; };
static const struct display_timing tianma_tm070rvhg71_timing = { static const struct display_timing tianma_tm070rvhg71_timing = {
......
...@@ -188,13 +188,13 @@ static void drm_test_mm_init(struct kunit *test) ...@@ -188,13 +188,13 @@ static void drm_test_mm_init(struct kunit *test)
static void drm_test_mm_debug(struct kunit *test) static void drm_test_mm_debug(struct kunit *test)
{ {
struct drm_printer p = drm_debug_printer(test->name);
struct drm_mm mm; struct drm_mm mm;
struct drm_mm_node nodes[2]; struct drm_mm_node nodes[2];
/* Create a small drm_mm with a couple of nodes and a few holes, and /* Create a small drm_mm with a couple of nodes and a few holes, and
* check that the debug iterator doesn't explode over a trivial drm_mm. * check that the debug iterator doesn't explode over a trivial drm_mm.
*/ */
drm_mm_init(&mm, 0, 4096); drm_mm_init(&mm, 0, 4096);
memset(nodes, 0, sizeof(nodes)); memset(nodes, 0, sizeof(nodes));
...@@ -209,6 +209,9 @@ static void drm_test_mm_debug(struct kunit *test) ...@@ -209,6 +209,9 @@ static void drm_test_mm_debug(struct kunit *test)
KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[1]), KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[1]),
"failed to reserve node[0] {start=%lld, size=%lld)\n", "failed to reserve node[0] {start=%lld, size=%lld)\n",
nodes[0].start, nodes[0].size); nodes[0].start, nodes[0].size);
drm_mm_print(&mm, &p);
KUNIT_SUCCEED(test);
} }
static bool expect_insert(struct kunit *test, struct drm_mm *mm, static bool expect_insert(struct kunit *test, struct drm_mm *mm,
......
...@@ -95,12 +95,18 @@ static int ttm_global_init(void) ...@@ -95,12 +95,18 @@ static int ttm_global_init(void)
ttm_pool_mgr_init(num_pages); ttm_pool_mgr_init(num_pages);
ttm_tt_mgr_init(num_pages, num_dma32); ttm_tt_mgr_init(num_pages, num_dma32);
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32 |
__GFP_NOWARN);
/* Retry without GFP_DMA32 for platforms DMA32 is not available */
if (unlikely(glob->dummy_read_page == NULL)) {
glob->dummy_read_page = alloc_page(__GFP_ZERO);
if (unlikely(glob->dummy_read_page == NULL)) { if (unlikely(glob->dummy_read_page == NULL)) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
pr_warn("Using GFP_DMA32 fallback for dummy_read_page\n");
}
INIT_LIST_HEAD(&glob->device_list); INIT_LIST_HEAD(&glob->device_list);
atomic_set(&glob->bo_count, 0); atomic_set(&glob->bo_count, 0);
......
...@@ -53,7 +53,7 @@ extern "C" { ...@@ -53,7 +53,7 @@ extern "C" {
#define DRM_IVPU_PARAM_CORE_CLOCK_RATE 3 #define DRM_IVPU_PARAM_CORE_CLOCK_RATE 3
#define DRM_IVPU_PARAM_NUM_CONTEXTS 4 #define DRM_IVPU_PARAM_NUM_CONTEXTS 4
#define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5 #define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6 #define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6 /* Deprecated */
#define DRM_IVPU_PARAM_CONTEXT_ID 7 #define DRM_IVPU_PARAM_CONTEXT_ID 7
#define DRM_IVPU_PARAM_FW_API_VERSION 8 #define DRM_IVPU_PARAM_FW_API_VERSION 8
#define DRM_IVPU_PARAM_ENGINE_HEARTBEAT 9 #define DRM_IVPU_PARAM_ENGINE_HEARTBEAT 9
...@@ -64,11 +64,18 @@ extern "C" { ...@@ -64,11 +64,18 @@ extern "C" {
#define DRM_IVPU_PLATFORM_TYPE_SILICON 0 #define DRM_IVPU_PLATFORM_TYPE_SILICON 0
/* Deprecated, use DRM_IVPU_JOB_PRIORITY */
#define DRM_IVPU_CONTEXT_PRIORITY_IDLE 0 #define DRM_IVPU_CONTEXT_PRIORITY_IDLE 0
#define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1 #define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1
#define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2 #define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
#define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3 #define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
#define DRM_IVPU_JOB_PRIORITY_DEFAULT 0
#define DRM_IVPU_JOB_PRIORITY_IDLE 1
#define DRM_IVPU_JOB_PRIORITY_NORMAL 2
#define DRM_IVPU_JOB_PRIORITY_FOCUS 3
#define DRM_IVPU_JOB_PRIORITY_REALTIME 4
/** /**
* DRM_IVPU_CAP_METRIC_STREAMER * DRM_IVPU_CAP_METRIC_STREAMER
* *
...@@ -112,10 +119,6 @@ struct drm_ivpu_param { ...@@ -112,10 +119,6 @@ struct drm_ivpu_param {
* %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS: * %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
* Lowest VPU virtual address available in the current context (read-only) * Lowest VPU virtual address available in the current context (read-only)
* *
* %DRM_IVPU_PARAM_CONTEXT_PRIORITY:
* Value of current context scheduling priority (read-write).
* See DRM_IVPU_CONTEXT_PRIORITY_* for possible values.
*
* %DRM_IVPU_PARAM_CONTEXT_ID: * %DRM_IVPU_PARAM_CONTEXT_ID:
* Current context ID, always greater than 0 (read-only) * Current context ID, always greater than 0 (read-only)
* *
...@@ -286,6 +289,18 @@ struct drm_ivpu_submit { ...@@ -286,6 +289,18 @@ struct drm_ivpu_submit {
* to be executed. The offset has to be 8-byte aligned. * to be executed. The offset has to be 8-byte aligned.
*/ */
__u32 commands_offset; __u32 commands_offset;
/**
* @priority:
*
* Priority to be set for related job command queue, can be one of the following:
* %DRM_IVPU_JOB_PRIORITY_DEFAULT
* %DRM_IVPU_JOB_PRIORITY_IDLE
* %DRM_IVPU_JOB_PRIORITY_NORMAL
* %DRM_IVPU_JOB_PRIORITY_FOCUS
* %DRM_IVPU_JOB_PRIORITY_REALTIME
*/
__u32 priority;
}; };
/* drm_ivpu_bo_wait job status codes */ /* drm_ivpu_bo_wait job status codes */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment