Commit 0ff37575 authored by Thomas Hellström's avatar Thomas Hellström Committed by Matthew Auld

drm/i915: Update object placement flags to be mutable

The object ops i915_GEM_OBJECT_HAS_IOMEM and the object
I915_BO_ALLOC_STRUCT_PAGE flags are considered immutable by
much of our code. Introduce a new mem_flags member to hold these
and make sure checks for these flags being set are either done
under the object lock or with pages properly pinned. The flags
will change during migration under the object lock.
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210624084240.270219-2-thomas.hellstrom@linux.intel.com
parent 4bc2d574
......@@ -177,8 +177,8 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class,
I915_BO_ALLOC_STRUCT_PAGE);
i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class, 0);
obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
/*
* Mark the object as volatile, such that the pages are marked as
......
......@@ -71,6 +71,28 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
mr->type == INTEL_MEMORY_STOLEN_LOCAL);
}
/**
* __i915_gem_object_is_lmem - Whether the object is resident in
* lmem while in the fence signaling critical path.
* @obj: The object to check.
*
* This function is intended to be called from within the fence signaling
* path where the fence keeps the object from being migrated. For example
* during gpu reset or similar.
*
* Return: Whether the object is resident in lmem.
*/
bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
#ifdef CONFIG_LOCKDEP
GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true));
#endif
return mr && (mr->type == INTEL_MEMORY_LOCAL ||
mr->type == INTEL_MEMORY_STOLEN_LOCAL);
}
struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
......
......@@ -21,6 +21,8 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
......
......@@ -684,7 +684,7 @@ __assign_mmap_offset(struct drm_i915_gem_object *obj,
if (mmap_type != I915_MMAP_TYPE_GTT &&
!i915_gem_object_has_struct_page(obj) &&
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
!i915_gem_object_has_iomem(obj))
return -ENODEV;
mmo = mmap_offset_attach(obj, mmap_type, file);
......@@ -708,7 +708,12 @@ __assign_mmap_offset_handle(struct drm_file *file,
if (!obj)
return -ENOENT;
err = i915_gem_object_lock_interruptible(obj, NULL);
if (err)
goto out_put;
err = __assign_mmap_offset(obj, mmap_type, offset, file);
i915_gem_object_unlock(obj);
out_put:
i915_gem_object_put(obj);
return err;
}
......@@ -932,10 +937,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return PTR_ERR(anon);
}
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
if (i915_gem_object_has_iomem(obj))
vma->vm_flags |= VM_IO;
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
/*
* We keep the ref on mmo->obj, not vm_file, but we require
......
......@@ -475,6 +475,44 @@ bool i915_gem_object_migratable(struct drm_i915_gem_object *obj)
return obj->mm.n_placements > 1;
}
/**
* i915_gem_object_has_struct_page - Whether the object is page-backed
* @obj: The object to query.
*
* This function should only be called while the object is locked or pinned,
* otherwise the page backing may change under the caller.
*
* Return: True if page-backed, false otherwise.
*/
bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
#ifdef CONFIG_LOCKDEP
if (IS_DGFX(to_i915(obj->base.dev)) &&
i915_gem_object_evictable((void __force *)obj))
assert_object_held_shared(obj);
#endif
return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE;
}
/**
* i915_gem_object_has_iomem - Whether the object is iomem-backed
* @obj: The object to query.
*
* This function should only be called while the object is locked or pinned,
* otherwise the iomem backing may change under the caller.
*
* Return: True if iomem-backed, false otherwise.
*/
bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
{
#ifdef CONFIG_LOCKDEP
if (IS_DGFX(to_i915(obj->base.dev)) &&
i915_gem_object_evictable((void __force *)obj))
assert_object_held_shared(obj);
#endif
return obj->mem_flags & I915_BO_FLAG_IOMEM;
}
void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
......
......@@ -148,7 +148,7 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
/*
* If more than one potential simultaneous locker, assert held.
*/
static inline void assert_object_held_shared(struct drm_i915_gem_object *obj)
static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj)
{
/*
* Note mm list lookup is protected by
......@@ -266,17 +266,9 @@ i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
return obj->ops->flags & flags;
}
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
return obj->flags & I915_BO_ALLOC_STRUCT_PAGE;
}
bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
}
bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
......
......@@ -33,10 +33,9 @@ struct i915_lut_handle {
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
#define I915_GEM_OBJECT_IS_PROXY BIT(3)
#define I915_GEM_OBJECT_NO_MMAP BIT(4)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
#define I915_GEM_OBJECT_IS_PROXY BIT(2)
#define I915_GEM_OBJECT_NO_MMAP BIT(3)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
......@@ -201,17 +200,25 @@ struct drm_i915_gem_object {
unsigned long flags;
#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
#define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
#define I915_BO_ALLOC_CPU_CLEAR BIT(3)
#define I915_BO_ALLOC_USER BIT(4)
#define I915_BO_ALLOC_CPU_CLEAR BIT(2)
#define I915_BO_ALLOC_USER BIT(3)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
I915_BO_ALLOC_STRUCT_PAGE | \
I915_BO_ALLOC_CPU_CLEAR | \
I915_BO_ALLOC_USER)
#define I915_BO_READONLY BIT(5)
#define I915_TILING_QUIRK_BIT 6 /* unknown swizzling; do not release! */
#define I915_BO_READONLY BIT(4)
#define I915_TILING_QUIRK_BIT 5 /* unknown swizzling; do not release! */
/**
* @mem_flags - Mutable placement-related flags
*
* These are flags that indicate specifics of the memory region
* the object is currently in. As such they are only stable
* either under the object lock or if the object is pinned.
*/
unsigned int mem_flags;
#define I915_BO_FLAG_STRUCT_PAGE BIT(0) /* Object backed by struct pages */
#define I915_BO_FLAG_IOMEM BIT(1) /* Object backed by IO memory */
/*
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
......
......@@ -351,7 +351,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
int err;
if (!i915_gem_object_has_struct_page(obj) &&
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
!i915_gem_object_has_iomem(obj))
return ERR_PTR(-ENXIO);
assert_object_held(obj);
......
......@@ -76,7 +76,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
/* We're no longer struct page backed */
obj->flags &= ~I915_BO_ALLOC_STRUCT_PAGE;
obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
__i915_gem_object_set_pages(obj, st, sg->length);
return 0;
......
......@@ -444,7 +444,7 @@ shmem_pread(struct drm_i915_gem_object *obj,
static void shmem_release(struct drm_i915_gem_object *obj)
{
if (obj->flags & I915_BO_ALLOC_STRUCT_PAGE)
if (i915_gem_object_has_struct_page(obj))
i915_gem_object_release_memory_region(obj);
fput(obj->base.filp);
......@@ -513,9 +513,8 @@ static int shmem_object_init(struct intel_memory_region *mem,
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class,
I915_BO_ALLOC_STRUCT_PAGE);
i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
......
......@@ -732,7 +732,6 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.name = "i915_gem_object_ttm",
.flags = I915_GEM_OBJECT_HAS_IOMEM,
.get_pages = i915_ttm_get_pages,
.put_pages = i915_ttm_put_pages,
......@@ -777,6 +776,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
i915_gem_object_init_memory_region(obj, mem);
i915_gem_object_make_unshrinkable(obj);
obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
obj->mem_flags |= I915_BO_FLAG_IOMEM;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->ttm.get_io_page.lock);
......
......@@ -510,8 +510,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENOMEM;
drm_gem_private_object_init(dev, &obj->base, args->user_size);
i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
I915_BO_ALLOC_STRUCT_PAGE);
i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, 0);
obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
......
......@@ -114,8 +114,8 @@ huge_gem_object(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
i915_gem_object_init(obj, &huge_ops, &lock_class,
I915_BO_ALLOC_STRUCT_PAGE);
i915_gem_object_init(obj, &huge_ops, &lock_class, 0);
obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
......
......@@ -167,9 +167,8 @@ huge_pages_object(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &huge_page_ops, &lock_class,
I915_BO_ALLOC_STRUCT_PAGE);
i915_gem_object_init(obj, &huge_page_ops, &lock_class, 0);
obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU;
......
......@@ -831,16 +831,19 @@ static int wc_check(struct drm_i915_gem_object *obj)
static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
{
bool no_map;
if (type == I915_MMAP_TYPE_GTT &&
!i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
return false;
if (type != I915_MMAP_TYPE_GTT &&
!i915_gem_object_has_struct_page(obj) &&
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
return false;
i915_gem_object_lock(obj, NULL);
no_map = (type != I915_MMAP_TYPE_GTT &&
!i915_gem_object_has_struct_page(obj) &&
!i915_gem_object_has_iomem(obj));
i915_gem_object_unlock(obj);
return true;
return !no_map;
}
static void object_set_placements(struct drm_i915_gem_object *obj,
......@@ -988,10 +991,16 @@ static const char *repr_mmap_type(enum i915_mmap_type type)
}
}
static bool can_access(const struct drm_i915_gem_object *obj)
static bool can_access(struct drm_i915_gem_object *obj)
{
return i915_gem_object_has_struct_page(obj) ||
i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
bool access;
i915_gem_object_lock(obj, NULL);
access = i915_gem_object_has_struct_page(obj) ||
i915_gem_object_has_iomem(obj);
i915_gem_object_unlock(obj);
return access;
}
static int __igt_mmap_access(struct drm_i915_private *i915,
......
......@@ -25,13 +25,14 @@ static int mock_phys_object(void *arg)
goto out;
}
i915_gem_object_lock(obj, NULL);
if (!i915_gem_object_has_struct_page(obj)) {
i915_gem_object_unlock(obj);
err = -EINVAL;
pr_err("shmem has no struct page\n");
goto out_obj;
}
i915_gem_object_lock(obj, NULL);
err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
i915_gem_object_unlock(obj);
if (err) {
......
......@@ -1039,7 +1039,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
if (ret)
break;
}
} else if (i915_gem_object_is_lmem(vma->obj)) {
} else if (__i915_gem_object_is_lmem(vma->obj)) {
struct intel_memory_region *mem = vma->obj->mm.region;
dma_addr_t dma;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment