Commit a8c05940 authored by Jerome Glisse's avatar Jerome Glisse Committed by Dave Airlie

drm/radeon: simplify semaphore handling v2

Directly use the suballocator to get small chunks of memory.
It's equally fast and doesn't crash when we encounter a GPU reset.

v2: rebased on new SA interface.
Signed-off-by: default avatarChristian König <deathsimple@vodafone.de>
Signed-off-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent c3b7fe8b
......@@ -3550,7 +3550,6 @@ void evergreen_fini(struct radeon_device *rdev)
evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
......
......@@ -1744,7 +1744,6 @@ void cayman_fini(struct radeon_device *rdev)
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
......
......@@ -2658,7 +2658,6 @@ void r600_fini(struct radeon_device *rdev)
r600_vram_scratch_fini(rdev);
radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
......
......@@ -434,34 +434,13 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
/*
* Semaphores.
*/
struct radeon_ring;
#define RADEON_SEMAPHORE_BO_SIZE 256
struct radeon_semaphore_driver {
rwlock_t lock;
struct list_head bo;
};
struct radeon_semaphore_bo;
/* everything here is constant */
struct radeon_semaphore {
struct list_head list;
struct radeon_sa_bo *sa_bo;
signed waiters;
uint64_t gpu_addr;
uint32_t *cpu_ptr;
struct radeon_semaphore_bo *bo;
};
struct radeon_semaphore_bo {
struct list_head list;
struct radeon_ib *ib;
struct list_head free;
struct radeon_semaphore semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
unsigned nused;
};
void radeon_semaphore_driver_fini(struct radeon_device *rdev);
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore);
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
......@@ -473,7 +452,8 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
bool sync_to[RADEON_NUM_RINGS],
int dst_ring);
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore *semaphore);
struct radeon_semaphore *semaphore,
struct radeon_fence *fence);
/*
* GART structures, functions & helpers
......@@ -1540,7 +1520,6 @@ struct radeon_device {
struct radeon_mman mman;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
wait_queue_head_t fence_queue;
struct radeon_semaphore_driver semaphore_drv;
struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
struct radeon_ib_pool ib_pool;
......
......@@ -732,11 +732,9 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->vram_mutex);
rwlock_init(&rdev->semaphore_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
/* initialize vm here */
rdev->vm_manager.use_bitmap = 1;
rdev->vm_manager.max_pfn = 1 << 20;
......
......@@ -140,7 +140,7 @@ static void radeon_fence_destroy(struct kref *kref)
fence = container_of(kref, struct radeon_fence, kref);
fence->seq = RADEON_FENCE_NOTEMITED_SEQ;
if (fence->semaphore)
radeon_semaphore_free(fence->rdev, fence->semaphore);
radeon_semaphore_free(fence->rdev, fence->semaphore, NULL);
kfree(fence);
}
......
......@@ -31,118 +31,40 @@
#include "drm.h"
#include "radeon.h"
static int radeon_semaphore_add_bo(struct radeon_device *rdev)
{
struct radeon_semaphore_bo *bo;
unsigned long irq_flags;
uint64_t gpu_addr;
uint32_t *cpu_ptr;
int r, i;
bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
if (bo == NULL) {
return -ENOMEM;
}
INIT_LIST_HEAD(&bo->free);
INIT_LIST_HEAD(&bo->list);
bo->nused = 0;
r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
if (r) {
dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
kfree(bo);
return r;
}
gpu_addr = radeon_sa_bo_gpu_addr(bo->ib->sa_bo);
cpu_ptr = radeon_sa_bo_cpu_addr(bo->ib->sa_bo);
for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
bo->semaphores[i].gpu_addr = gpu_addr;
bo->semaphores[i].cpu_ptr = cpu_ptr;
bo->semaphores[i].bo = bo;
list_add_tail(&bo->semaphores[i].list, &bo->free);
gpu_addr += 8;
cpu_ptr += 2;
}
write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
return 0;
}
static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
struct radeon_semaphore_bo *bo)
{
radeon_sa_bo_free(rdev, &bo->ib->sa_bo, NULL);
radeon_fence_unref(&bo->ib->fence);
list_del(&bo->list);
kfree(bo);
}
void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
{
struct radeon_semaphore_bo *bo, *n;
if (list_empty(&rdev->semaphore_drv.bo)) {
return;
}
/* only shrink if first bo has free semaphore */
bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
if (list_empty(&bo->free)) {
return;
}
list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
if (bo->nused)
continue;
radeon_semaphore_del_bo_locked(rdev, bo);
}
}
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
struct radeon_semaphore_bo *bo;
unsigned long irq_flags;
bool do_retry = true;
int r;
retry:
*semaphore = NULL;
write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
if (list_empty(&bo->free))
continue;
*semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
(*semaphore)->cpu_ptr[0] = 0;
(*semaphore)->cpu_ptr[1] = 0;
list_del(&(*semaphore)->list);
bo->nused++;
break;
}
write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
if (do_retry) {
do_retry = false;
r = radeon_semaphore_add_bo(rdev);
if (r)
return r;
goto retry;
}
return -ENOMEM;
}
r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
&(*semaphore)->sa_bo, 8, 8, true);
if (r) {
kfree(*semaphore);
*semaphore = NULL;
return r;
}
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
*((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
return 0;
}
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
--semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
}
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
++semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
}
......@@ -200,29 +122,16 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
}
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore *semaphore)
struct radeon_semaphore *semaphore,
struct radeon_fence *fence)
{
unsigned long irq_flags;
write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
semaphore->bo->nused--;
list_add_tail(&semaphore->list, &semaphore->bo->free);
radeon_semaphore_shrink_locked(rdev);
write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
}
void radeon_semaphore_driver_fini(struct radeon_device *rdev)
{
struct radeon_semaphore_bo *bo, *n;
unsigned long irq_flags;
write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
/* we force to free everything */
list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) {
if (!list_empty(&bo->free)) {
dev_err(rdev->dev, "still in use semaphore\n");
}
radeon_semaphore_del_bo_locked(rdev, bo);
if (semaphore == NULL) {
return;
}
if (semaphore->waiters > 0) {
dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
" hardware lockup imminent!\n", semaphore);
}
write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
radeon_sa_bo_free(rdev, &semaphore->sa_bo, fence);
kfree(semaphore);
}
......@@ -317,7 +317,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
out_cleanup:
if (semaphore)
radeon_semaphore_free(rdev, semaphore);
radeon_semaphore_free(rdev, semaphore, NULL);
if (fence1)
radeon_fence_unref(&fence1);
......@@ -437,7 +437,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
out_cleanup:
if (semaphore)
radeon_semaphore_free(rdev, semaphore);
radeon_semaphore_free(rdev, semaphore, NULL);
if (fenceA)
radeon_fence_unref(&fenceA);
......
......@@ -1278,7 +1278,6 @@ void rv770_fini(struct radeon_device *rdev)
rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
......
......@@ -4109,7 +4109,6 @@ void si_fini(struct radeon_device *rdev)
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment