Commit a6ac1bc3 authored by Dave Airlie's avatar Dave Airlie

drm/qxl: fix ioport interactions for kernel submitted commands.

So qxl has ioports, but it really really really doesn't want you
to write to them twice, but if you write and get a signal before
the irq arrives to let you know its completed, you have to think
ahead and avoid writing another time.

However this works fine for update area where really multiple
writes aren't the end of the world, however with create primary
surface, you can't ever do multiple writes. So this stop internal
kernel writes from doing interruptible waits, because otherwise
we have no idea if this write is a new one or a continuation of
a previous one.

virtual hw sucks more than real hw.

This fixes lockups and VM crashes when resizing and starting/stopping
X.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 95643359
...@@ -277,7 +277,7 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, ...@@ -277,7 +277,7 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
return 0; return 0;
} }
static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port) static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
{ {
int irq_num; int irq_num;
long addr = qdev->io_base + port; long addr = qdev->io_base + port;
...@@ -285,20 +285,29 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port) ...@@ -285,20 +285,29 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port)
mutex_lock(&qdev->async_io_mutex); mutex_lock(&qdev->async_io_mutex);
irq_num = atomic_read(&qdev->irq_received_io_cmd); irq_num = atomic_read(&qdev->irq_received_io_cmd);
if (qdev->last_sent_io_cmd > irq_num) { if (qdev->last_sent_io_cmd > irq_num) {
ret = wait_event_interruptible(qdev->io_cmd_event, if (intr)
atomic_read(&qdev->irq_received_io_cmd) > irq_num); ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
if (ret) atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
else
ret = wait_event_timeout(qdev->io_cmd_event,
atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
/* 0 is timeout, just bail the "hw" has gone away */
if (ret <= 0)
goto out; goto out;
irq_num = atomic_read(&qdev->irq_received_io_cmd); irq_num = atomic_read(&qdev->irq_received_io_cmd);
} }
outb(val, addr); outb(val, addr);
qdev->last_sent_io_cmd = irq_num + 1; qdev->last_sent_io_cmd = irq_num + 1;
ret = wait_event_interruptible(qdev->io_cmd_event, if (intr)
atomic_read(&qdev->irq_received_io_cmd) > irq_num); ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
else
ret = wait_event_timeout(qdev->io_cmd_event,
atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
out: out:
if (ret > 0)
ret = 0;
mutex_unlock(&qdev->async_io_mutex); mutex_unlock(&qdev->async_io_mutex);
return ret; return ret;
} }
...@@ -308,7 +317,7 @@ static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port) ...@@ -308,7 +317,7 @@ static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
int ret; int ret;
restart: restart:
ret = wait_for_io_cmd_user(qdev, val, port); ret = wait_for_io_cmd_user(qdev, val, port, false);
if (ret == -ERESTARTSYS) if (ret == -ERESTARTSYS)
goto restart; goto restart;
} }
...@@ -340,7 +349,7 @@ int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf, ...@@ -340,7 +349,7 @@ int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
mutex_lock(&qdev->update_area_mutex); mutex_lock(&qdev->update_area_mutex);
qdev->ram_header->update_area = *area; qdev->ram_header->update_area = *area;
qdev->ram_header->update_surface = surface_id; qdev->ram_header->update_surface = surface_id;
ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC); ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
mutex_unlock(&qdev->update_area_mutex); mutex_unlock(&qdev->update_area_mutex);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment