Commit 5bd24e78 authored by Thomas Hellström's avatar Thomas Hellström

drm/xe/vm: Subclass userptr vmas

The construct allocating only parts of the vma structure when
the userptr part is not needed is very fragile. A developer could
add additional fields below the userptr part, and the code could
easily attempt to access the userptr part even if its not persent.

So introduce xe_userptr_vma which subclasses struct xe_vma the
proper way, and accordingly modify a couple of interfaces.
This should also help if adding userptr helpers to drm_gpuvm.

v2:
- Fix documentation of to_userptr_vma() (Matthew Brost)
- Fix allocation and freeing of vmas to clearer distinguish
  between the types.

Closes: https://lore.kernel.org/intel-xe/0c4cc1a7-f409-4597-b110-81f9e45d1ffe@embeddedor.com/T/#u
Fixes: a4cc60a5 ("drm/xe: Only alloc userptr part of xe_vma for userptrs")
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240131091628.12318-1-thomas.hellstrom@linux.intel.com
parent 152ca51d
......@@ -165,7 +165,8 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
goto unlock_vm;
}
if (!xe_vma_is_userptr(vma) || !xe_vma_userptr_check_repin(vma)) {
if (!xe_vma_is_userptr(vma) ||
!xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
downgrade_write(&vm->lock);
write_locked = false;
}
......@@ -181,11 +182,13 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
/* TODO: Validate fault */
if (xe_vma_is_userptr(vma) && write_locked) {
struct xe_userptr_vma *uvma = to_userptr_vma(vma);
spin_lock(&vm->userptr.invalidated_lock);
list_del_init(&vma->userptr.invalidate_link);
list_del_init(&uvma->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
ret = xe_vma_userptr_pin_pages(vma);
ret = xe_vma_userptr_pin_pages(uvma);
if (ret)
goto unlock_vm;
......@@ -220,7 +223,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
dma_fence_put(fence);
if (xe_vma_is_userptr(vma))
ret = xe_vma_userptr_check_repin(vma);
ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
vma->usm.tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
......
......@@ -618,8 +618,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
if (!xe_vma_is_null(vma)) {
if (xe_vma_is_userptr(vma))
xe_res_first_sg(vma->userptr.sg, 0, xe_vma_size(vma),
&curs);
xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
xe_vma_size(vma), &curs);
else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
xe_vma_size(vma), &curs);
......@@ -906,17 +906,17 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe,
#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
static int xe_pt_userptr_inject_eagain(struct xe_vma *vma)
static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
{
u32 divisor = vma->userptr.divisor ? vma->userptr.divisor : 2;
u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
static u32 count;
if (count++ % divisor == divisor - 1) {
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_vm *vm = xe_vma_vm(&uvma->vma);
vma->userptr.divisor = divisor << 1;
uvma->userptr.divisor = divisor << 1;
spin_lock(&vm->userptr.invalidated_lock);
list_move_tail(&vma->userptr.invalidate_link,
list_move_tail(&uvma->userptr.invalidate_link,
&vm->userptr.invalidated);
spin_unlock(&vm->userptr.invalidated_lock);
return true;
......@@ -927,7 +927,7 @@ static int xe_pt_userptr_inject_eagain(struct xe_vma *vma)
#else
static bool xe_pt_userptr_inject_eagain(struct xe_vma *vma)
static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
{
return false;
}
......@@ -1000,9 +1000,9 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
{
struct xe_pt_migrate_pt_update *userptr_update =
container_of(pt_update, typeof(*userptr_update), base);
struct xe_vma *vma = pt_update->vma;
unsigned long notifier_seq = vma->userptr.notifier_seq;
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma);
unsigned long notifier_seq = uvma->userptr.notifier_seq;
struct xe_vm *vm = xe_vma_vm(&uvma->vma);
int err = xe_pt_vm_dependencies(pt_update->job,
&vm->rftree[pt_update->tile_id],
pt_update->start,
......@@ -1023,7 +1023,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
*/
do {
down_read(&vm->userptr.notifier_lock);
if (!mmu_interval_read_retry(&vma->userptr.notifier,
if (!mmu_interval_read_retry(&uvma->userptr.notifier,
notifier_seq))
break;
......@@ -1032,11 +1032,11 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
if (userptr_update->bind)
return -EAGAIN;
notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier);
} while (true);
/* Inject errors to test_whether they are handled correctly */
if (userptr_update->bind && xe_pt_userptr_inject_eagain(vma)) {
if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) {
up_read(&vm->userptr.notifier_lock);
return -EAGAIN;
}
......@@ -1297,7 +1297,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
vma->tile_present |= BIT(tile->id);
if (bind_pt_update.locked) {
vma->userptr.initial_bind = true;
to_userptr_vma(vma)->userptr.initial_bind = true;
up_read(&vm->userptr.notifier_lock);
xe_bo_put_commit(&deferred);
}
......@@ -1642,7 +1642,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu
if (!vma->tile_present) {
spin_lock(&vm->userptr.invalidated_lock);
list_del_init(&vma->userptr.invalidate_link);
list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
}
up_read(&vm->userptr.notifier_lock);
......
This diff is collapsed.
......@@ -160,6 +160,18 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
}
/**
* to_userptr_vma() - Return a pointer to an embedding userptr vma
* @vma: Pointer to the embedded struct xe_vma
*
* Return: Pointer to the embedding userptr vma
*/
static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
{
xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
return container_of(vma, struct xe_userptr_vma, vma);
}
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
int xe_vm_create_ioctl(struct drm_device *dev, void *data,
......@@ -222,9 +234,9 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
}
}
int xe_vma_userptr_pin_pages(struct xe_vma *vma);
int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
int xe_vma_userptr_check_repin(struct xe_vma *vma);
int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
......
......@@ -37,6 +37,8 @@ struct xe_vm;
struct xe_userptr {
/** @invalidate_link: Link for the vm::userptr.invalidated list */
struct list_head invalidate_link;
/** @userptr: link into VM repin list if userptr. */
struct list_head repin_link;
/**
* @notifier: MMU notifier for user pointer (invalidation call back)
*/
......@@ -68,8 +70,6 @@ struct xe_vma {
* resv.
*/
union {
/** @userptr: link into VM repin list if userptr. */
struct list_head userptr;
/** @rebind: link into VM if this VMA needs rebinding. */
struct list_head rebind;
/** @destroy: link to contested list when VM is being closed. */
......@@ -105,11 +105,15 @@ struct xe_vma {
* @pat_index: The pat index to use when encoding the PTEs for this vma.
*/
u16 pat_index;
};
/**
* @userptr: user pointer state, only allocated for VMAs that are
* user pointers
*/
/**
* struct xe_userptr_vma - A userptr vma subclass
* @vma: The vma.
* @userptr: Additional userptr information.
*/
struct xe_userptr_vma {
struct xe_vma vma;
struct xe_userptr userptr;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment