Commit a4cc60a5 authored by Matthew Brost's avatar Matthew Brost Committed by Rodrigo Vivi

drm/xe: Only alloc userptr part of xe_vma for userptrs

Only alloc userptr part of xe_vma for userptrs, this will save on space
in the common BO case.
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent eae553cb
......@@ -880,14 +880,17 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
XE_BUG_ON(start >= end);
XE_BUG_ON(end >= vm->size);
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!bo && !is_null) /* userptr */
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
else
vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
GFP_KERNEL);
if (!vma) {
vma = ERR_PTR(-ENOMEM);
return vma;
}
INIT_LIST_HEAD(&vma->combined_links.rebind);
INIT_LIST_HEAD(&vma->userptr.invalidate_link);
INIT_LIST_HEAD(&vma->notifier.rebind_link);
INIT_LIST_HEAD(&vma->extobj.link);
......@@ -931,6 +934,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
u64 size = end - start + 1;
int err;
INIT_LIST_HEAD(&vma->userptr.invalidate_link);
vma->gpuva.gem.offset = bo_offset_or_userptr;
err = mmu_interval_notifier_insert(&vma->userptr.notifier,
......
......@@ -34,6 +34,31 @@ struct xe_vm;
#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
/** struct xe_userptr - User pointer */
struct xe_userptr {
/** @invalidate_link: Link for the vm::userptr.invalidated list */
struct list_head invalidate_link;
/**
* @notifier: MMU notifier for user pointer (invalidation call back)
*/
struct mmu_interval_notifier notifier;
/** @sgt: storage for a scatter gather table */
struct sg_table sgt;
/** @sg: allocated scatter gather table */
struct sg_table *sg;
/** @notifier_seq: notifier sequence number */
unsigned long notifier_seq;
/**
* @initial_bind: user pointer has been bound at least once.
* write: vm->userptr.notifier_lock in read mode and vm->resv held.
* read: vm->userptr.notifier_lock in write mode or vm->resv held.
*/
bool initial_bind;
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
u32 divisor;
#endif
};
struct xe_vma {
/** @gpuva: Base GPUVA object */
struct drm_gpuva gpuva;
......@@ -68,31 +93,6 @@ struct xe_vma {
struct work_struct destroy_work;
};
/** @userptr: user pointer state */
struct {
/** @invalidate_link: Link for the vm::userptr.invalidated list */
struct list_head invalidate_link;
/**
* @notifier: MMU notifier for user pointer (invalidation call back)
*/
struct mmu_interval_notifier notifier;
/** @sgt: storage for a scatter gather table */
struct sg_table sgt;
/** @sg: allocated scatter gather table */
struct sg_table *sg;
/** @notifier_seq: notifier sequence number */
unsigned long notifier_seq;
/**
* @initial_bind: user pointer has been bound at least once.
* write: vm->userptr.notifier_lock in read mode and vm->resv held.
* read: vm->userptr.notifier_lock in write mode or vm->resv held.
*/
bool initial_bind;
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
u32 divisor;
#endif
} userptr;
/** @usm: unified shared memory state */
struct {
/** @tile_invalidated: VMA has been invalidated */
......@@ -122,6 +122,12 @@ struct xe_vma {
*/
struct list_head link;
} extobj;
/**
* @userptr: user pointer state, only allocated for VMAs that are
* user pointers
*/
struct xe_userptr userptr;
};
struct xe_device;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment