Commit e176fe17 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: remove mclk_lock

Not needed any more.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 8dacc127
......@@ -1558,8 +1558,6 @@ struct amdgpu_dpm {
struct amdgpu_pm {
struct mutex mutex;
/* write locked while reprogramming mclk */
struct rw_semaphore mclk_lock;
u32 current_sclk;
u32 current_mclk;
u32 default_sclk;
......
......@@ -1401,7 +1401,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->gfx.gpu_clock_mutex);
mutex_init(&adev->srbm_mutex);
mutex_init(&adev->grbm_idx_mutex);
init_rwsem(&adev->pm.mclk_lock);
init_rwsem(&adev->exclusive_lock);
mutex_init(&adev->mn_lock);
hash_init(adev->mn_hash);
......
......@@ -272,11 +272,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
bo->flags = flags;
amdgpu_fill_placement_to_bo(bo, placement);
/* Kernel allocation are uninterruptible */
down_read(&adev->pm.mclk_lock);
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
acc_size, sg, NULL, &amdgpu_ttm_bo_destroy);
up_read(&adev->pm.mclk_lock);
if (unlikely(r != 0)) {
return r;
}
......
......@@ -581,7 +581,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
}
mutex_lock(&adev->ddev->struct_mutex);
down_write(&adev->pm.mclk_lock);
mutex_lock(&adev->ring_lock);
/* update whether vce is active */
......@@ -629,7 +628,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
done:
mutex_unlock(&adev->ring_lock);
up_write(&adev->pm.mclk_lock);
mutex_unlock(&adev->ddev->struct_mutex);
}
......
......@@ -966,52 +966,20 @@ void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size)
man->size = size >> PAGE_SHIFT;
}
static struct vm_operations_struct amdgpu_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops = NULL;
static int amdgpu_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ttm_buffer_object *bo;
struct amdgpu_device *adev;
int r;
bo = (struct ttm_buffer_object *)vma->vm_private_data;
if (bo == NULL) {
return VM_FAULT_NOPAGE;
}
adev = amdgpu_get_adev(bo->bdev);
down_read(&adev->pm.mclk_lock);
r = ttm_vm_ops->fault(vma, vmf);
up_read(&adev->pm.mclk_lock);
return r;
}
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
struct amdgpu_device *adev;
int r;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return -EINVAL;
}
file_priv = filp->private_data;
adev = file_priv->minor->dev->dev_private;
if (adev == NULL) {
if (adev == NULL)
return -EINVAL;
}
r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
if (unlikely(r != 0)) {
return r;
}
if (unlikely(ttm_vm_ops == NULL)) {
ttm_vm_ops = vma->vm_ops;
amdgpu_ttm_vm_ops = *ttm_vm_ops;
amdgpu_ttm_vm_ops.fault = &amdgpu_ttm_fault;
}
vma->vm_ops = &amdgpu_ttm_vm_ops;
return 0;
return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
}
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment