Commit beaa71d6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2023-08-25' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "A bit bigger than I'd care for, but it's mostly a single vmwgfx fix
  and a fix for an i915 hotplug probing. Otherwise misc i915, bridge,
  panfrost and dma-buf fixes.

  core:
   - add a HPD poll helper

  i915:
   - fix regression in i915 polling
   - fix docs build warning
   - fix DG2 idle power consumption

  bridge:
   - samsung-dsim: init fix

  panfrost:
   - fix speed binning issue

  dma-buf:
   - fix recursive lock in fence signal

  vmwgfx:
   - fix shader stage validation
   - fix NULL ptr derefs in gem put"

* tag 'drm-fixes-2023-08-25' of git://anongit.freedesktop.org/drm/drm:
  drm/i915: Fix HPD polling, reenabling the output poll work as needed
  drm: Add an HPD poll helper to reschedule the poll work
  drm/vmwgfx: Fix possible invalid drm gem put calls
  drm/vmwgfx: Fix shader stage validation
  dma-buf/sw_sync: Avoid recursive lock during fence signal
  drm/i915: fix Sphinx indentation warning
  drm/i915/dgfx: Enable d3cold at s2idle
  drm/display/dp: Fix the DP DSC Receiver cap size
  drm/panfrost: Skip speed binning on EOPNOTSUPP
  drm: bridge: samsung-dsim: Fix init during host transfer
parents 4f9e7fab 59fe2029
......@@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
*/
static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
{
LIST_HEAD(signalled);
struct sync_pt *pt, *next;
trace_sync_timeline(obj);
......@@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
if (!timeline_fence_signaled(&pt->base))
break;
list_del_init(&pt->link);
dma_fence_get(&pt->base);
list_move_tail(&pt->link, &signalled);
rb_erase(&pt->node, &obj->pt_tree);
/*
* A signal callback may release the last reference to this
* fence, causing it to be freed. That operation has to be
* last to avoid a use after free inside this loop, and must
* be after we remove the fence from the timeline in order to
* prevent deadlocking on timeline->lock inside
* timeline_fence_release().
*/
dma_fence_signal_locked(&pt->base);
}
spin_unlock_irq(&obj->lock);
list_for_each_entry_safe(pt, next, &signalled, link) {
list_del_init(&pt->link);
dma_fence_put(&pt->base);
}
}
/**
......
......@@ -1386,6 +1386,18 @@ static void samsung_dsim_disable_irq(struct samsung_dsim *dsi)
disable_irq(dsi->irq);
}
static void samsung_dsim_set_stop_state(struct samsung_dsim *dsi, bool enable)
{
u32 reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
if (enable)
reg |= DSIM_FORCE_STOP_STATE;
else
reg &= ~DSIM_FORCE_STOP_STATE;
samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
}
static int samsung_dsim_init(struct samsung_dsim *dsi)
{
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
......@@ -1445,15 +1457,12 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
u32 reg;
if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
samsung_dsim_set_display_mode(dsi);
samsung_dsim_set_display_enable(dsi, true);
} else {
reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
reg &= ~DSIM_FORCE_STOP_STATE;
samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
samsung_dsim_set_stop_state(dsi, false);
}
dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
......@@ -1463,16 +1472,12 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
u32 reg;
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
reg |= DSIM_FORCE_STOP_STATE;
samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
}
if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
samsung_dsim_set_stop_state(dsi, true);
dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
}
......@@ -1775,6 +1780,8 @@ static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host,
if (ret)
return ret;
samsung_dsim_set_stop_state(dsi, false);
ret = mipi_dsi_create_packet(&xfer.packet, msg);
if (ret < 0)
return ret;
......
......@@ -262,6 +262,26 @@ static bool drm_kms_helper_enable_hpd(struct drm_device *dev)
}
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void reschedule_output_poll_work(struct drm_device *dev)
{
unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
if (dev->mode_config.delayed_event)
/*
* FIXME:
*
* Use short (1s) delay to handle the initial delayed event.
* This delay should not be needed, but Optimus/nouveau will
* fail in a mysterious way if the delayed event is handled as
* soon as possible like it is done in
* drm_helper_probe_single_connector_modes() in case the poll
* was enabled before.
*/
delay = HZ;
schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
}
/**
* drm_kms_helper_poll_enable - re-enable output polling.
* @dev: drm_device
......@@ -279,37 +299,41 @@ static bool drm_kms_helper_enable_hpd(struct drm_device *dev)
*/
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
bool poll = false;
unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll ||
dev->mode_config.poll_running)
return;
poll = drm_kms_helper_enable_hpd(dev);
if (dev->mode_config.delayed_event) {
/*
* FIXME:
*
* Use short (1s) delay to handle the initial delayed event.
* This delay should not be needed, but Optimus/nouveau will
* fail in a mysterious way if the delayed event is handled as
* soon as possible like it is done in
* drm_helper_probe_single_connector_modes() in case the poll
* was enabled before.
*/
poll = true;
delay = HZ;
}
if (poll)
schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
if (drm_kms_helper_enable_hpd(dev) ||
dev->mode_config.delayed_event)
reschedule_output_poll_work(dev);
dev->mode_config.poll_running = true;
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
/**
* drm_kms_helper_poll_reschedule - reschedule the output polling work
* @dev: drm_device
*
* This function reschedules the output polling work, after polling for a
* connector has been enabled.
*
* Drivers must call this helper after enabling polling for a connector by
* setting %DRM_CONNECTOR_POLL_CONNECT / %DRM_CONNECTOR_POLL_DISCONNECT flags
* in drm_connector::polled. Note that after disabling polling by clearing these
* flags for a connector will stop the output polling work automatically if
* the polling is disabled for all other connectors as well.
*
* The function can be called only after polling has been enabled by calling
* drm_kms_helper_poll_init() / drm_kms_helper_poll_enable().
*/
void drm_kms_helper_poll_reschedule(struct drm_device *dev)
{
if (dev->mode_config.poll_running)
reschedule_output_poll_work(dev);
}
EXPORT_SYMBOL(drm_kms_helper_poll_reschedule);
static enum drm_connector_status
drm_helper_probe_detect_ctx(struct drm_connector *connector, bool force)
{
......
......@@ -211,7 +211,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
drm_kms_helper_poll_enable(&dev_priv->drm);
drm_kms_helper_poll_reschedule(&dev_priv->drm);
mod_delayed_work(dev_priv->unordered_wq,
&dev_priv->display.hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
......@@ -649,7 +649,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
drm_connector_list_iter_end(&conn_iter);
if (enabled)
drm_kms_helper_poll_enable(&dev_priv->drm);
drm_kms_helper_poll_reschedule(&dev_priv->drm);
mutex_unlock(&dev_priv->drm.mode_config.mutex);
......
......@@ -26,6 +26,7 @@
* The kernel driver is only responsible for loading the HuC firmware and
* triggering its security authentication. This is done differently depending
* on the platform:
*
* - older platforms (from Gen9 to most Gen12s): the load is performed via DMA
* and the authentication via GuC
* - DG2: load and authentication are both performed via GSC.
......@@ -33,6 +34,7 @@
* not-DG2 older platforms), while the authentication is done in 2-steps,
* a first auth for clear-media workloads via GuC and a second one for all
* workloads via GSC.
*
* On platforms where the GuC does the authentication, to correctly do so the
* HuC binary must be loaded before the GuC one.
* Loading the HuC is optional; however, not using the HuC might negatively
......
......@@ -443,7 +443,6 @@ static int i915_pcode_init(struct drm_i915_private *i915)
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
int ret;
if (i915_inject_probe_failure(dev_priv))
......@@ -557,15 +556,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
intel_bw_init_hw(dev_priv);
/*
* FIXME: Temporary hammer to avoid freezing the machine on our DGFX
* This should be totally removed when we handle the pci states properly
* on runtime PM and on s2idle cases.
*/
root_pdev = pcie_find_root_port(pdev);
if (root_pdev)
pci_d3cold_disable(root_pdev);
return 0;
err_opregion:
......@@ -591,7 +581,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
i915_perf_fini(dev_priv);
......@@ -599,10 +588,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
if (pdev->msi_enabled)
pci_disable_msi(pdev);
root_pdev = pcie_find_root_port(pdev);
if (root_pdev)
pci_d3cold_enable(root_pdev);
}
/**
......@@ -1517,6 +1502,8 @@ static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
struct intel_gt *gt;
int ret, i;
......@@ -1568,6 +1555,15 @@ static int intel_runtime_suspend(struct device *kdev)
drm_err(&dev_priv->drm,
"Unclaimed access detected prior to suspending\n");
/*
* FIXME: Temporary hammer to avoid freezing the machine on our DGFX
* This should be totally removed when we handle the pci states properly
* on runtime PM.
*/
root_pdev = pcie_find_root_port(pdev);
if (root_pdev)
pci_d3cold_disable(root_pdev);
rpm->suspended = true;
/*
......@@ -1606,6 +1602,8 @@ static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
struct intel_gt *gt;
int ret, i;
......@@ -1619,6 +1617,11 @@ static int intel_runtime_resume(struct device *kdev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
rpm->suspended = false;
root_pdev = pcie_find_root_port(pdev);
if (root_pdev)
pci_d3cold_enable(root_pdev);
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
drm_dbg(&dev_priv->drm,
"Unclaimed access during suspend, bios?\n");
......
......@@ -96,7 +96,7 @@ static int panfrost_read_speedbin(struct device *dev)
* keep going without it; any other error means that we are
* supposed to read the bin value, but we failed doing so.
*/
if (ret != -ENOENT) {
if (ret != -ENOENT && ret != -EOPNOTSUPP) {
DRM_DEV_ERROR(dev, "Cannot read speed-bin (%d).", ret);
return ret;
}
......
......@@ -497,10 +497,9 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
if (!(flags & drm_vmw_synccpu_allow_cs)) {
atomic_dec(&vmw_bo->cpu_writers);
}
ttm_bo_put(&vmw_bo->tbo);
vmw_user_bo_unref(vmw_bo);
}
drm_gem_object_put(&vmw_bo->tbo.base);
return ret;
}
......@@ -540,8 +539,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
return ret;
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo);
drm_gem_object_put(&vbo->tbo.base);
vmw_user_bo_unref(vbo);
if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
......
......@@ -195,6 +195,14 @@ static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
return buf;
}
static inline void vmw_user_bo_unref(struct vmw_bo *vbo)
{
if (vbo) {
ttm_bo_put(&vbo->tbo);
drm_gem_object_put(&vbo->tbo.base);
}
}
static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
{
return container_of((gobj), struct vmw_bo, tbo.base);
......
......@@ -1513,4 +1513,16 @@ static inline bool vmw_has_fences(struct vmw_private *vmw)
return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0;
}
static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model,
u32 shader_type)
{
SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX;
if (shader_model >= VMW_SM_5)
max_allowed = SVGA3D_SHADERTYPE_MAX;
else if (shader_model >= VMW_SM_4)
max_allowed = SVGA3D_SHADERTYPE_DX10_MAX;
return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed;
}
#endif
......@@ -1164,8 +1164,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
}
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
ttm_bo_put(&vmw_bo->tbo);
drm_gem_object_put(&vmw_bo->tbo.base);
vmw_user_bo_unref(vmw_bo);
if (unlikely(ret != 0))
return ret;
......@@ -1221,8 +1220,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
ttm_bo_put(&vmw_bo->tbo);
drm_gem_object_put(&vmw_bo->tbo.base);
vmw_user_bo_unref(vmw_bo);
if (unlikely(ret != 0))
return ret;
......@@ -1992,7 +1990,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
VMW_DEBUG_USER("Illegal shader type %u.\n",
(unsigned int) cmd->body.type);
return -EINVAL;
......@@ -2115,8 +2113,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
struct vmw_resource *res = NULL;
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
......@@ -2133,6 +2129,14 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
(unsigned int) cmd->body.type,
(unsigned int) cmd->body.slot);
return -EINVAL;
}
binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_cb;
......@@ -2141,14 +2145,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
binding.size = cmd->body.sizeInBytes;
binding.slot = cmd->body.slot;
if (binding.shader_slot >= max_shader_num ||
binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
(unsigned int) cmd->body.type,
(unsigned int) binding.slot);
return -EINVAL;
}
vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
binding.slot);
......@@ -2207,15 +2203,13 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
container_of(header, typeof(*cmd), header);
SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dShaderResourceViewId);
if ((u64) cmd->body.startView + (u64) num_sr_view >
(u64) SVGA3D_DX_MAX_SRVIEWS ||
cmd->body.type >= max_allowed) {
!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
VMW_DEBUG_USER("Invalid shader binding.\n");
return -EINVAL;
}
......@@ -2239,8 +2233,6 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
struct vmw_resource *res = NULL;
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_shader binding;
......@@ -2251,8 +2243,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= max_allowed ||
cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
VMW_DEBUG_USER("Illegal shader type %u.\n",
(unsigned int) cmd->body.type);
return -EINVAL;
......
......@@ -1665,10 +1665,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo) {
vmw_bo_unreference(&bo);
drm_gem_object_put(&bo->tbo.base);
}
if (bo)
vmw_user_bo_unref(bo);
if (surface)
vmw_surface_unreference(&surface);
......
......@@ -451,8 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
vmw_bo_unreference(&buf);
drm_gem_object_put(&buf->tbo.base);
vmw_user_bo_unref(buf);
out_unlock:
mutex_unlock(&overlay->mutex);
......
......@@ -809,8 +809,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
shader_type, num_input_sig,
num_output_sig, tfile, shader_handle);
out_bad_arg:
vmw_bo_unreference(&buffer);
drm_gem_object_put(&buffer->tbo.base);
vmw_user_bo_unref(buffer);
return ret;
}
......
......@@ -1537,7 +1537,7 @@ enum drm_dp_phy {
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_DSC_RECEIVER_CAP_SIZE 0xf
#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 3
#define DP_LTTPR_COMMON_CAP_SIZE 8
......
......@@ -25,6 +25,7 @@ void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector);
void drm_kms_helper_poll_disable(struct drm_device *dev);
void drm_kms_helper_poll_enable(struct drm_device *dev);
void drm_kms_helper_poll_reschedule(struct drm_device *dev);
bool drm_kms_helper_is_poll_worker(void);
enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment