Commit 30c60ba3 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'vmwgfx-drm-fixes-5.18-2022-05-13' of...

Merge tag 'vmwgfx-drm-fixes-5.18-2022-05-13' of https://gitlab.freedesktop.org/zack/vmwgfx into drm-fixes

vmwgfx fixes for:
- Black screen due to fences using FIFO checks on SVGA3
- Random black screens on boot due to uninitialized drm_mode_fb_cmd2
- Hangs on SVGA3 due to command buffers being used with gbobjects
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Zack Rusin <zackr@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/a1d32799e4c74b8540216376d7576bb783ca07ba.camel@vmware.com
parents 5005e981 21d1d192
...@@ -528,7 +528,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) ...@@ -528,7 +528,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
*seqno = atomic_add_return(1, &dev_priv->marker_seq); *seqno = atomic_add_return(1, &dev_priv->marker_seq);
} while (*seqno == 0); } while (*seqno == 0);
if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) { if (!vmw_has_fences(dev_priv)) {
/* /*
* Don't request hardware to send a fence. The * Don't request hardware to send a fence. The
...@@ -675,11 +675,14 @@ int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv, ...@@ -675,11 +675,14 @@ int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
*/ */
bool vmw_cmd_supported(struct vmw_private *vmw) bool vmw_cmd_supported(struct vmw_private *vmw)
{ {
if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS | bool has_cmdbufs =
SVGA_CAP_CMD_BUFFERS_2)) != 0) (vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
return true; SVGA_CAP_CMD_BUFFERS_2)) != 0;
if (vmw_is_svga_v3(vmw))
return (has_cmdbufs &&
(vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0);
/* /*
* We have FIFO cmd's * We have FIFO cmd's
*/ */
return vmw->fifo_mem != NULL; return has_cmdbufs || vmw->fifo_mem != NULL;
} }
...@@ -1679,4 +1679,12 @@ static inline void vmw_irq_status_write(struct vmw_private *vmw, ...@@ -1679,4 +1679,12 @@ static inline void vmw_irq_status_write(struct vmw_private *vmw,
outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT); outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT);
} }
static inline bool vmw_has_fences(struct vmw_private *vmw)
{
if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
SVGA_CAP_CMD_BUFFERS_2)) != 0)
return true;
return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0;
}
#endif #endif
...@@ -483,7 +483,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par, ...@@ -483,7 +483,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
static int vmw_fb_kms_framebuffer(struct fb_info *info) static int vmw_fb_kms_framebuffer(struct fb_info *info)
{ {
struct drm_mode_fb_cmd2 mode_cmd; struct drm_mode_fb_cmd2 mode_cmd = {0};
struct vmw_fb_par *par = info->par; struct vmw_fb_par *par = info->par;
struct fb_var_screeninfo *var = &info->var; struct fb_var_screeninfo *var = &info->var;
struct drm_framebuffer *cur_fb; struct drm_framebuffer *cur_fb;
......
...@@ -82,6 +82,22 @@ fman_from_fence(struct vmw_fence_obj *fence) ...@@ -82,6 +82,22 @@ fman_from_fence(struct vmw_fence_obj *fence)
return container_of(fence->base.lock, struct vmw_fence_manager, lock); return container_of(fence->base.lock, struct vmw_fence_manager, lock);
} }
static u32 vmw_fence_goal_read(struct vmw_private *vmw)
{
if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
else
return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
}
static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
{
if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
else
vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
}
/* /*
* Note on fencing subsystem usage of irqs: * Note on fencing subsystem usage of irqs:
* Typically the vmw_fences_update function is called * Typically the vmw_fences_update function is called
...@@ -392,7 +408,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, ...@@ -392,7 +408,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
if (likely(!fman->seqno_valid)) if (likely(!fman->seqno_valid))
return false; return false;
goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL); goal_seqno = vmw_fence_goal_read(fman->dev_priv);
if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
return false; return false;
...@@ -400,9 +416,8 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, ...@@ -400,9 +416,8 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
list_for_each_entry(fence, &fman->fence_list, head) { list_for_each_entry(fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) { if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true; fman->seqno_valid = true;
vmw_fifo_mem_write(fman->dev_priv, vmw_fence_goal_write(fman->dev_priv,
SVGA_FIFO_FENCE_GOAL, fence->base.seqno);
fence->base.seqno);
break; break;
} }
} }
...@@ -434,13 +449,12 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) ...@@ -434,13 +449,12 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
if (dma_fence_is_signaled_locked(&fence->base)) if (dma_fence_is_signaled_locked(&fence->base))
return false; return false;
goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL); goal_seqno = vmw_fence_goal_read(fman->dev_priv);
if (likely(fman->seqno_valid && if (likely(fman->seqno_valid &&
goal_seqno - fence->base.seqno < VMW_FENCE_WRAP)) goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
return false; return false;
vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL, vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
fence->base.seqno);
fman->seqno_valid = true; fman->seqno_valid = true;
return true; return true;
......
...@@ -32,6 +32,14 @@ ...@@ -32,6 +32,14 @@
#define VMW_FENCE_WRAP (1 << 24) #define VMW_FENCE_WRAP (1 << 24)
static u32 vmw_irqflag_fence_goal(struct vmw_private *vmw)
{
if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
return SVGA_IRQFLAG_REG_FENCE_GOAL;
else
return SVGA_IRQFLAG_FENCE_GOAL;
}
/** /**
* vmw_thread_fn - Deferred (process context) irq handler * vmw_thread_fn - Deferred (process context) irq handler
* *
...@@ -96,7 +104,7 @@ static irqreturn_t vmw_irq_handler(int irq, void *arg) ...@@ -96,7 +104,7 @@ static irqreturn_t vmw_irq_handler(int irq, void *arg)
wake_up_all(&dev_priv->fifo_queue); wake_up_all(&dev_priv->fifo_queue);
if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE | if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
SVGA_IRQFLAG_FENCE_GOAL)) && vmw_irqflag_fence_goal(dev_priv))) &&
!test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending)) !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
ret = IRQ_WAKE_THREAD; ret = IRQ_WAKE_THREAD;
...@@ -137,8 +145,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv, ...@@ -137,8 +145,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true; return true;
if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE) && if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
vmw_fifo_idle(dev_priv, seqno))
return true; return true;
/** /**
...@@ -160,6 +167,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, ...@@ -160,6 +167,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
unsigned long timeout) unsigned long timeout)
{ {
struct vmw_fifo_state *fifo_state = dev_priv->fifo; struct vmw_fifo_state *fifo_state = dev_priv->fifo;
bool fifo_down = false;
uint32_t count = 0; uint32_t count = 0;
uint32_t signal_seq; uint32_t signal_seq;
...@@ -176,12 +184,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, ...@@ -176,12 +184,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
*/ */
if (fifo_idle) { if (fifo_idle) {
down_read(&fifo_state->rwsem);
if (dev_priv->cman) { if (dev_priv->cman) {
ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible, ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
10*HZ); 10*HZ);
if (ret) if (ret)
goto out_err; goto out_err;
} else if (fifo_state) {
down_read(&fifo_state->rwsem);
fifo_down = true;
} }
} }
...@@ -218,12 +228,12 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, ...@@ -218,12 +228,12 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
} }
} }
finish_wait(&dev_priv->fence_queue, &__wait); finish_wait(&dev_priv->fence_queue, &__wait);
if (ret == 0 && fifo_idle) if (ret == 0 && fifo_idle && fifo_state)
vmw_fence_write(dev_priv, signal_seq); vmw_fence_write(dev_priv, signal_seq);
wake_up_all(&dev_priv->fence_queue); wake_up_all(&dev_priv->fence_queue);
out_err: out_err:
if (fifo_idle) if (fifo_down)
up_read(&fifo_state->rwsem); up_read(&fifo_state->rwsem);
return ret; return ret;
...@@ -266,13 +276,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) ...@@ -266,13 +276,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
void vmw_goal_waiter_add(struct vmw_private *dev_priv) void vmw_goal_waiter_add(struct vmw_private *dev_priv)
{ {
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL, vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
&dev_priv->goal_queue_waiters); &dev_priv->goal_queue_waiters);
} }
void vmw_goal_waiter_remove(struct vmw_private *dev_priv) void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{ {
vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL, vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
&dev_priv->goal_queue_waiters); &dev_priv->goal_queue_waiters);
} }
......
...@@ -1344,7 +1344,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv, ...@@ -1344,7 +1344,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
mode_cmd, mode_cmd,
is_bo_proxy); is_bo_proxy);
/* /*
* vmw_create_bo_proxy() adds a reference that is no longer * vmw_create_bo_proxy() adds a reference that is no longer
* needed * needed
...@@ -1385,13 +1384,16 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, ...@@ -1385,13 +1384,16 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
ret = vmw_user_lookup_handle(dev_priv, file_priv, ret = vmw_user_lookup_handle(dev_priv, file_priv,
mode_cmd->handles[0], mode_cmd->handles[0],
&surface, &bo); &surface, &bo);
if (ret) if (ret) {
DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
mode_cmd->handles[0], mode_cmd->handles[0]);
goto err_out; goto err_out;
}
if (!bo && if (!bo &&
!vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) { !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
DRM_ERROR("Surface size cannot exceed %dx%d", DRM_ERROR("Surface size cannot exceed %dx%d\n",
dev_priv->texture_max_width, dev_priv->texture_max_width,
dev_priv->texture_max_height); dev_priv->texture_max_height);
goto err_out; goto err_out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment