Commit dee53e7f authored by Jerome Glisse's avatar Jerome Glisse Committed by Christian König

drm/radeon: add an exclusive lock for GPU reset v2

GPU reset need to be exclusive, one happening at a time. For this
add a rw semaphore so that any path that trigger GPU activities
have to take the semaphore as a reader thus allowing concurency.

The GPU reset path take the semaphore as a writer ensuring that
no concurrent reset take place.

v2: init rw semaphore
Signed-off-by: default avatarJerome Glisse <jglisse@redhat.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 93bf888c
...@@ -1446,6 +1446,7 @@ struct radeon_device { ...@@ -1446,6 +1446,7 @@ struct radeon_device {
struct device *dev; struct device *dev;
struct drm_device *ddev; struct drm_device *ddev;
struct pci_dev *pdev; struct pci_dev *pdev;
struct rw_semaphore exclusive_lock;
/* ASIC */ /* ASIC */
union radeon_asic_config config; union radeon_asic_config config;
enum radeon_family family; enum radeon_family family;
......
...@@ -499,7 +499,9 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -499,7 +499,9 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct radeon_cs_parser parser; struct radeon_cs_parser parser;
int r; int r;
down_read(&rdev->exclusive_lock);
if (!rdev->accel_working) { if (!rdev->accel_working) {
up_read(&rdev->exclusive_lock);
return -EBUSY; return -EBUSY;
} }
/* initialize parser */ /* initialize parser */
...@@ -512,6 +514,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -512,6 +514,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r) { if (r) {
DRM_ERROR("Failed to initialize parser !\n"); DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r); radeon_cs_parser_fini(&parser, r);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r); r = radeon_cs_handle_lockup(rdev, r);
return r; return r;
} }
...@@ -520,6 +523,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -520,6 +523,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r != -ERESTARTSYS) if (r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r); DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r); radeon_cs_parser_fini(&parser, r);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r); r = radeon_cs_handle_lockup(rdev, r);
return r; return r;
} }
...@@ -533,6 +537,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -533,6 +537,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
} }
out: out:
radeon_cs_parser_fini(&parser, r); radeon_cs_parser_fini(&parser, r);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r); r = radeon_cs_handle_lockup(rdev, r);
return r; return r;
} }
......
...@@ -734,6 +734,7 @@ int radeon_device_init(struct radeon_device *rdev, ...@@ -734,6 +734,7 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->gem.mutex); mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex); mutex_init(&rdev->pm.mutex);
init_rwsem(&rdev->pm.mclk_lock); init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue); init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue); init_waitqueue_head(&rdev->irq.idle_queue);
r = radeon_gem_init(rdev); r = radeon_gem_init(rdev);
...@@ -988,6 +989,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) ...@@ -988,6 +989,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
int r; int r;
int resched; int resched;
down_write(&rdev->exclusive_lock);
radeon_save_bios_scratch_regs(rdev); radeon_save_bios_scratch_regs(rdev);
/* block TTM */ /* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
...@@ -1007,6 +1009,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) ...@@ -1007,6 +1009,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
dev_info(rdev->dev, "GPU reset failed\n"); dev_info(rdev->dev, "GPU reset failed\n");
} }
up_write(&rdev->exclusive_lock);
return r; return r;
} }
......
...@@ -215,12 +215,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -215,12 +215,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
uint32_t handle; uint32_t handle;
int r; int r;
down_read(&rdev->exclusive_lock);
/* create a gem object to contain this object in */ /* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE); args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment, r = radeon_gem_object_create(rdev, args->size, args->alignment,
args->initial_domain, false, args->initial_domain, false,
false, &gobj); false, &gobj);
if (r) { if (r) {
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r); r = radeon_gem_handle_lockup(rdev, r);
return r; return r;
} }
...@@ -228,10 +230,12 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -228,10 +230,12 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
/* drop reference from allocate - handle holds it now */ /* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
if (r) { if (r) {
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r); r = radeon_gem_handle_lockup(rdev, r);
return r; return r;
} }
args->handle = handle; args->handle = handle;
up_read(&rdev->exclusive_lock);
return 0; return 0;
} }
...@@ -240,6 +244,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, ...@@ -240,6 +244,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
{ {
/* transition the BO to a domain - /* transition the BO to a domain -
* just validate the BO into a certain domain */ * just validate the BO into a certain domain */
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_set_domain *args = data; struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
struct radeon_bo *robj; struct radeon_bo *robj;
...@@ -247,10 +252,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, ...@@ -247,10 +252,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
/* for now if someone requests domain CPU - /* for now if someone requests domain CPU -
* just make sure the buffer is finished with */ * just make sure the buffer is finished with */
down_read(&rdev->exclusive_lock);
/* just do a BO wait for now */ /* just do a BO wait for now */
gobj = drm_gem_object_lookup(dev, filp, args->handle); gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) { if (gobj == NULL) {
up_read(&rdev->exclusive_lock);
return -ENOENT; return -ENOENT;
} }
robj = gem_to_radeon_bo(gobj); robj = gem_to_radeon_bo(gobj);
...@@ -258,6 +265,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, ...@@ -258,6 +265,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(robj->rdev, r); r = radeon_gem_handle_lockup(robj->rdev, r);
return r; return r;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment