Commit d0baf921 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next

One is fix for a regression in 4.3, One irq locking rework.

* 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux:
  drm/vmwgfx: Relax irq locking somewhat
  drm/vmwgfx: Properly flush cursor updates and page-flips
parents e02328f4 d2e8851a
......@@ -643,7 +643,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue);
dev_priv->fence_queue_waiters = 0;
atomic_set(&dev_priv->fifo_queue_waiters, 0);
dev_priv->fifo_queue_waiters = 0;
dev_priv->used_memory_size = 0;
......
......@@ -440,13 +440,12 @@ struct vmw_private {
spinlock_t waiter_lock;
int fence_queue_waiters; /* Protected by waiter_lock */
int goal_queue_waiters; /* Protected by waiter_lock */
int cmdbuf_waiters; /* Protected by irq_lock */
int error_waiters; /* Protected by irq_lock */
atomic_t fifo_queue_waiters;
int cmdbuf_waiters; /* Protected by waiter_lock */
int error_waiters; /* Protected by waiter_lock */
int fifo_queue_waiters; /* Protected by waiter_lock */
uint32_t last_read_seqno;
spinlock_t irq_lock;
struct vmw_fence_manager *fman;
uint32_t irq_mask;
uint32_t irq_mask; /* Updates protected by waiter_lock */
/*
* Device state
......
......@@ -252,7 +252,6 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
unsigned long timeout)
{
long ret = 1L;
unsigned long irq_flags;
if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
return 0;
......@@ -262,16 +261,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
return vmw_fifo_wait_noirq(dev_priv, bytes,
interruptible, timeout);
spin_lock(&dev_priv->waiter_lock);
if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
spin_unlock(&dev_priv->waiter_lock);
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
&dev_priv->fifo_queue_waiters);
if (interruptible)
ret = wait_event_interruptible_timeout
......@@ -287,14 +278,8 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
else if (likely(ret > 0))
ret = 0;
spin_lock(&dev_priv->waiter_lock);
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
spin_unlock(&dev_priv->waiter_lock);
vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
&dev_priv->fifo_queue_waiters);
return ret;
}
......
......@@ -36,15 +36,13 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status, masked_status;
spin_lock(&dev_priv->irq_lock);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
masked_status = status & dev_priv->irq_mask;
spin_unlock(&dev_priv->irq_lock);
masked_status = status & READ_ONCE(dev_priv->irq_mask);
if (likely(status))
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
if (!masked_status)
if (!status)
return IRQ_NONE;
if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
......@@ -190,65 +188,51 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
return ret;
}
void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
void vmw_generic_waiter_add(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
spin_lock(&dev_priv->waiter_lock);
if (dev_priv->fence_queue_waiters++ == 0) {
unsigned long irq_flags;
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_ANY_FENCE,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
spin_lock_bh(&dev_priv->waiter_lock);
if ((*waiter_count)++ == 0) {
outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev_priv->irq_mask |= flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
spin_unlock(&dev_priv->waiter_lock);
spin_unlock_bh(&dev_priv->waiter_lock);
}
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
spin_lock(&dev_priv->waiter_lock);
if (--dev_priv->fence_queue_waiters == 0) {
unsigned long irq_flags;
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
spin_lock_bh(&dev_priv->waiter_lock);
if (--(*waiter_count) == 0) {
dev_priv->irq_mask &= ~flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
spin_unlock(&dev_priv->waiter_lock);
spin_unlock_bh(&dev_priv->waiter_lock);
}
void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
{
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
&dev_priv->fence_queue_waiters);
}
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
{
vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
&dev_priv->fence_queue_waiters);
}
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
spin_lock(&dev_priv->waiter_lock);
if (dev_priv->goal_queue_waiters++ == 0) {
unsigned long irq_flags;
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FENCE_GOAL,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
spin_unlock(&dev_priv->waiter_lock);
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
&dev_priv->goal_queue_waiters);
}
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
spin_lock(&dev_priv->waiter_lock);
if (--dev_priv->goal_queue_waiters == 0) {
unsigned long irq_flags;
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
spin_unlock(&dev_priv->waiter_lock);
vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
&dev_priv->goal_queue_waiters);
}
int vmw_wait_seqno(struct vmw_private *dev_priv,
......@@ -305,7 +289,6 @@ void vmw_irq_preinstall(struct drm_device *dev)
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
spin_lock_init(&dev_priv->irq_lock);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
......@@ -328,30 +311,3 @@ void vmw_irq_uninstall(struct drm_device *dev)
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
void vmw_generic_waiter_add(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
unsigned long irq_flags;
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
if ((*waiter_count)++ == 0) {
outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
dev_priv->irq_mask |= flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
unsigned long irq_flags;
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
if (--(*waiter_count) == 0) {
dev_priv->irq_mask &= ~flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
......@@ -78,7 +78,7 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
cmd->cursor.hotspotX = hotspotX;
cmd->cursor.hotspotY = hotspotY;
vmw_fifo_commit(dev_priv, cmd_size);
vmw_fifo_commit_flush(dev_priv, cmd_size);
return 0;
}
......
......@@ -717,6 +717,8 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
&event->event.tv_usec,
true);
vmw_fence_obj_unreference(&fence);
} else {
vmw_fifo_flush(dev_priv, false);
}
return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment