Commit 85b9e487 authored by Thomas Hellstrom's avatar Thomas Hellstrom Committed by Dave Airlie

drm/vmwgfx: Fix a circular locking dependency bug.

Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent a87897ed
...@@ -113,6 +113,7 @@ struct vmw_fifo_state { ...@@ -113,6 +113,7 @@ struct vmw_fifo_state {
unsigned long static_buffer_size; unsigned long static_buffer_size;
bool using_bounce_buffer; bool using_bounce_buffer;
uint32_t capabilities; uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem; struct rw_semaphore rwsem;
}; };
...@@ -213,7 +214,7 @@ struct vmw_private { ...@@ -213,7 +214,7 @@ struct vmw_private {
* Fencing and IRQs. * Fencing and IRQs.
*/ */
uint32_t fence_seq; atomic_t fence_seq;
wait_queue_head_t fence_queue; wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue; wait_queue_head_t fifo_queue;
atomic_t fence_queue_waiters; atomic_t fence_queue_waiters;
......
...@@ -74,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) ...@@ -74,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
fifo->reserved_size = 0; fifo->reserved_size = 0;
fifo->using_bounce_buffer = false; fifo->using_bounce_buffer = false;
mutex_init(&fifo->fifo_mutex);
init_rwsem(&fifo->rwsem); init_rwsem(&fifo->rwsem);
/* /*
...@@ -117,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) ...@@ -117,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
(unsigned int) min, (unsigned int) min,
(unsigned int) fifo->capabilities); (unsigned int) fifo->capabilities);
dev_priv->fence_seq = dev_priv->last_read_sequence; atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
return vmw_fifo_send_fence(dev_priv, &dummy); return vmw_fifo_send_fence(dev_priv, &dummy);
...@@ -283,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) ...@@ -283,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
int ret; int ret;
down_write(&fifo_state->rwsem); mutex_lock(&fifo_state->fifo_mutex);
max = ioread32(fifo_mem + SVGA_FIFO_MAX); max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN); min = ioread32(fifo_mem + SVGA_FIFO_MIN);
next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
...@@ -351,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) ...@@ -351,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
} }
out_err: out_err:
fifo_state->reserved_size = 0; fifo_state->reserved_size = 0;
up_write(&fifo_state->rwsem); mutex_unlock(&fifo_state->fifo_mutex);
return NULL; return NULL;
} }
...@@ -426,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) ...@@ -426,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
} }
down_write(&fifo_state->rwsem);
if (fifo_state->using_bounce_buffer || reserveable) { if (fifo_state->using_bounce_buffer || reserveable) {
next_cmd += bytes; next_cmd += bytes;
if (next_cmd >= max) if (next_cmd >= max)
...@@ -437,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) ...@@ -437,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
if (reserveable) if (reserveable)
iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
mb(); mb();
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
up_write(&fifo_state->rwsem); up_write(&fifo_state->rwsem);
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
mutex_unlock(&fifo_state->fifo_mutex);
} }
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
...@@ -451,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) ...@@ -451,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
fm = vmw_fifo_reserve(dev_priv, bytes); fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) { if (unlikely(fm == NULL)) {
down_write(&fifo_state->rwsem); *sequence = atomic_read(&dev_priv->fence_seq);
*sequence = dev_priv->fence_seq;
up_write(&fifo_state->rwsem);
ret = -ENOMEM; ret = -ENOMEM;
(void)vmw_fallback_wait(dev_priv, false, true, *sequence, (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
false, 3*HZ); false, 3*HZ);
...@@ -461,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) ...@@ -461,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
} }
do { do {
*sequence = dev_priv->fence_seq++; *sequence = atomic_add_return(1, &dev_priv->fence_seq);
} while (*sequence == 0); } while (*sequence == 0);
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
......
...@@ -84,20 +84,13 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv, ...@@ -84,20 +84,13 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
vmw_fifo_idle(dev_priv, sequence)) vmw_fifo_idle(dev_priv, sequence))
return true; return true;
/**
* Below is to signal stale fences that have wrapped.
* First, block fence submission.
*/
down_read(&fifo_state->rwsem);
/** /**
* Then check if the sequence is higher than what we've actually * Then check if the sequence is higher than what we've actually
* emitted. Then the fence is stale and signaled. * emitted. Then the fence is stale and signaled.
*/ */
ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
up_read(&fifo_state->rwsem); > VMW_FENCE_WRAP);
return ret; return ret;
} }
...@@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, ...@@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
if (fifo_idle) if (fifo_idle)
down_read(&fifo_state->rwsem); down_read(&fifo_state->rwsem);
signal_seq = dev_priv->fence_seq; signal_seq = atomic_read(&dev_priv->fence_seq);
ret = 0; ret = 0;
for (;;) { for (;;) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment