Commit 3b7a2b24 authored by Jerome Glisse's avatar Jerome Glisse Committed by Dave Airlie

drm/radeon: rework fence handling, drop fence list v7

Using 64bits fence sequence we can directly compare sequence
number to know if a fence is signaled or not. Thus the fence
list became useless, so does the fence lock that mainly
protected the fence list.

Things like ring.ready are no longer behind a lock, this should
be ok as ring.ready is initialized once and will only change
when facing lockup. Worst case is that we return an -EBUSY just
after a successfull GPU reset, or we go into wait state instead
of returning -EBUSY (thus delaying reporting -EBUSY to fence
wait caller).

v2: Remove left over comment, force using writeback on cayman and
    newer, thus not having to suffer from possibly scratch reg
    exhaustion
v3: Rebase on top of change to uint64 fence patch
v4: Change DCE5 test to force write back on cayman and newer but
    also any APU such as PALM or SUMO family
v5: Rebase on top of new uint64 fence patch
v6: Just break if seq doesn't change any more. Use radeon_fence
    prefix for all function names. Even if it's now highly optimized,
    try avoiding polling to often.
v7: We should never poll the last_seq from the hardware without
    waking the sleeping threads, otherwise we might lose events.
Signed-off-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarChristian König <deathsimple@vodafone.de>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent bb635567
...@@ -263,15 +263,12 @@ struct radeon_fence_driver { ...@@ -263,15 +263,12 @@ struct radeon_fence_driver {
atomic64_t last_seq; atomic64_t last_seq;
unsigned long last_activity; unsigned long last_activity;
wait_queue_head_t queue; wait_queue_head_t queue;
struct list_head emitted;
struct list_head signaled;
bool initialized; bool initialized;
}; };
struct radeon_fence { struct radeon_fence {
struct radeon_device *rdev; struct radeon_device *rdev;
struct kref kref; struct kref kref;
struct list_head list;
/* protected by radeon_fence.lock */ /* protected by radeon_fence.lock */
uint64_t seq; uint64_t seq;
/* RB, DMA, etc. */ /* RB, DMA, etc. */
...@@ -291,7 +288,7 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring); ...@@ -291,7 +288,7 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring); int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence); void radeon_fence_unref(struct radeon_fence **fence);
int radeon_fence_count_emitted(struct radeon_device *rdev, int ring); unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
/* /*
* Tiling registers * Tiling registers
...@@ -1534,7 +1531,6 @@ struct radeon_device { ...@@ -1534,7 +1531,6 @@ struct radeon_device {
struct radeon_mode_info mode_info; struct radeon_mode_info mode_info;
struct radeon_scratch scratch; struct radeon_scratch scratch;
struct radeon_mman mman; struct radeon_mman mman;
rwlock_t fence_lock;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
struct radeon_semaphore_driver semaphore_drv; struct radeon_semaphore_driver semaphore_drv;
struct mutex ring_lock; struct mutex ring_lock;
......
...@@ -225,9 +225,9 @@ int radeon_wb_init(struct radeon_device *rdev) ...@@ -225,9 +225,9 @@ int radeon_wb_init(struct radeon_device *rdev)
/* disable event_write fences */ /* disable event_write fences */
rdev->wb.use_event = false; rdev->wb.use_event = false;
/* disabled via module param */ /* disabled via module param */
if (radeon_no_wb == 1) if (radeon_no_wb == 1) {
rdev->wb.enabled = false; rdev->wb.enabled = false;
else { } else {
if (rdev->flags & RADEON_IS_AGP) { if (rdev->flags & RADEON_IS_AGP) {
/* often unreliable on AGP */ /* often unreliable on AGP */
rdev->wb.enabled = false; rdev->wb.enabled = false;
...@@ -237,8 +237,9 @@ int radeon_wb_init(struct radeon_device *rdev) ...@@ -237,8 +237,9 @@ int radeon_wb_init(struct radeon_device *rdev)
} else { } else {
rdev->wb.enabled = true; rdev->wb.enabled = true;
/* event_write fences are only available on r600+ */ /* event_write fences are only available on r600+ */
if (rdev->family >= CHIP_R600) if (rdev->family >= CHIP_R600) {
rdev->wb.use_event = true; rdev->wb.use_event = true;
}
} }
} }
/* always use writeback/events on NI, APUs */ /* always use writeback/events on NI, APUs */
...@@ -731,7 +732,6 @@ int radeon_device_init(struct radeon_device *rdev, ...@@ -731,7 +732,6 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->gem.mutex); mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex); mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->vram_mutex); mutex_init(&rdev->vram_mutex);
rwlock_init(&rdev->fence_lock);
rwlock_init(&rdev->semaphore_drv.lock); rwlock_init(&rdev->semaphore_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects); INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue); init_waitqueue_head(&rdev->irq.vblank_queue);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment