Commit d1ef9671 authored by Matthew Brost's avatar Matthew Brost Committed by Lucas De Marchi

drm/xe: Convert to USM lock to rwsem

Remove contention from GPU fault path for ASID->VM lookup.
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240918054436.1971839-1-matthew.brost@intel.com
(cherry picked from commit 1378c633a3fbfeb344c486ffda0e920a21e62712)
Signed-off-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
parent cb589770
...@@ -339,9 +339,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, ...@@ -339,9 +339,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
init_waitqueue_head(&xe->ufence_wq); init_waitqueue_head(&xe->ufence_wq);
err = drmm_mutex_init(&xe->drm, &xe->usm.lock); init_rwsem(&xe->usm.lock);
if (err)
goto err;
xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
......
...@@ -369,7 +369,7 @@ struct xe_device { ...@@ -369,7 +369,7 @@ struct xe_device {
/** @usm.next_asid: next ASID, used to cyclical alloc asids */ /** @usm.next_asid: next ASID, used to cyclical alloc asids */
u32 next_asid; u32 next_asid;
/** @usm.lock: protects UM state */ /** @usm.lock: protects UM state */
struct mutex lock; struct rw_semaphore lock;
} usm; } usm;
/** @pinned: pinned BO state */ /** @pinned: pinned BO state */
......
...@@ -198,13 +198,13 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf) ...@@ -198,13 +198,13 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
return -EFAULT; return -EFAULT;
/* ASID to VM */ /* ASID to VM */
mutex_lock(&xe->usm.lock); down_read(&xe->usm.lock);
vm = xa_load(&xe->usm.asid_to_vm, pf->asid); vm = xa_load(&xe->usm.asid_to_vm, pf->asid);
if (vm && xe_vm_in_fault_mode(vm)) if (vm && xe_vm_in_fault_mode(vm))
xe_vm_get(vm); xe_vm_get(vm);
else else
vm = NULL; vm = NULL;
mutex_unlock(&xe->usm.lock); up_read(&xe->usm.lock);
if (!vm) if (!vm)
return -EINVAL; return -EINVAL;
...@@ -549,11 +549,11 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc) ...@@ -549,11 +549,11 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
return -EINVAL; return -EINVAL;
/* ASID to VM */ /* ASID to VM */
mutex_lock(&xe->usm.lock); down_read(&xe->usm.lock);
vm = xa_load(&xe->usm.asid_to_vm, acc->asid); vm = xa_load(&xe->usm.asid_to_vm, acc->asid);
if (vm) if (vm)
xe_vm_get(vm); xe_vm_get(vm);
mutex_unlock(&xe->usm.lock); up_read(&xe->usm.lock);
if (!vm || !xe_vm_in_fault_mode(vm)) if (!vm || !xe_vm_in_fault_mode(vm))
return -EINVAL; return -EINVAL;
......
...@@ -1613,7 +1613,7 @@ void xe_vm_close_and_put(struct xe_vm *vm) ...@@ -1613,7 +1613,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
up_write(&vm->lock); up_write(&vm->lock);
mutex_lock(&xe->usm.lock); down_write(&xe->usm.lock);
if (vm->usm.asid) { if (vm->usm.asid) {
void *lookup; void *lookup;
...@@ -1623,7 +1623,7 @@ void xe_vm_close_and_put(struct xe_vm *vm) ...@@ -1623,7 +1623,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid); lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid);
xe_assert(xe, lookup == vm); xe_assert(xe, lookup == vm);
} }
mutex_unlock(&xe->usm.lock); up_write(&xe->usm.lock);
for_each_tile(tile, xe, id) for_each_tile(tile, xe, id)
xe_range_fence_tree_fini(&vm->rftree[id]); xe_range_fence_tree_fini(&vm->rftree[id]);
...@@ -1772,11 +1772,11 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, ...@@ -1772,11 +1772,11 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
goto err_close_and_put; goto err_close_and_put;
if (xe->info.has_asid) { if (xe->info.has_asid) {
mutex_lock(&xe->usm.lock); down_write(&xe->usm.lock);
err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
XA_LIMIT(1, XE_MAX_ASID - 1), XA_LIMIT(1, XE_MAX_ASID - 1),
&xe->usm.next_asid, GFP_KERNEL); &xe->usm.next_asid, GFP_KERNEL);
mutex_unlock(&xe->usm.lock); up_write(&xe->usm.lock);
if (err < 0) if (err < 0)
goto err_free_id; goto err_free_id;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment