Commit 32b7cf51 authored by Thomas Hellström's avatar Thomas Hellström Committed by Matthew Auld

drm/i915/ttm: Use TTM for system memory

For discrete, use TTM for both cached and WC system memory. That means
we currently rely on the TTM memory accounting / shrinker. For cached
system memory we should consider remaining shmem-backed, which can be
implemented from our ttm_tt_populate callback. We can then also reuse our
own very elaborate shrinker for that memory.

If an object is evicted to a gem allowable region, we will now consider
the object migrated, and we flip the gem region and move the object to a
different region list. Since we are now changing gem regions, we can't
any longer rely on the CONTIGUOUS flag being set based on the region
min page size, so remove that flag update. If we want to reintroduce it,
we need to put it in the mutable flags.
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210624084240.270219-4-thomas.hellstrom@linux.intel.com
parent 3c2b8f32
...@@ -13,11 +13,7 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, ...@@ -13,11 +13,7 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
{ {
obj->mm.region = intel_memory_region_get(mem); obj->mm.region = intel_memory_region_get(mem);
if (obj->base.size <= mem->min_page_size)
obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
mutex_lock(&mem->objects.lock); mutex_lock(&mem->objects.lock);
list_add(&obj->mm.region_link, &mem->objects.list); list_add(&obj->mm.region_link, &mem->objects.list);
mutex_unlock(&mem->objects.lock); mutex_unlock(&mem->objects.lock);
} }
......
...@@ -302,6 +302,7 @@ void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_ ...@@ -302,6 +302,7 @@ void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_
struct pagevec pvec; struct pagevec pvec;
struct page *page; struct page *page;
GEM_WARN_ON(IS_DGFX(to_i915(obj->base.dev)));
__i915_gem_object_release_shmem(obj, pages, true); __i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages); i915_gem_gtt_finish_pages(obj, pages);
...@@ -560,6 +561,7 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, ...@@ -560,6 +561,7 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
resource_size_t offset; resource_size_t offset;
int err; int err;
GEM_WARN_ON(IS_DGFX(dev_priv));
obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
if (IS_ERR(obj)) if (IS_ERR(obj))
return obj; return obj;
......
...@@ -286,6 +286,26 @@ static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) ...@@ -286,6 +286,26 @@ static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
{ {
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
unsigned int cache_level; unsigned int cache_level;
unsigned int i;
/*
* If object was moved to an allowable region, update the object
* region to consider it migrated. Note that if it's currently not
* in an allowable region, it's evicted and we don't update the
* object region.
*/
if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
for (i = 0; i < obj->mm.n_placements; ++i) {
struct intel_memory_region *mr = obj->mm.placements[i];
if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
mr != obj->mm.region) {
i915_gem_object_release_memory_region(obj);
i915_gem_object_init_memory_region(obj, mr);
break;
}
}
}
obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM); obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
...@@ -615,13 +635,6 @@ static int i915_ttm_get_pages(struct drm_i915_gem_object *obj) ...@@ -615,13 +635,6 @@ static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
/* Move to the requested placement. */ /* Move to the requested placement. */
i915_ttm_placement_from_obj(obj, &requested, busy, &placement); i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
/*
* For now we support LMEM only with TTM.
* TODO: Remove with system support
*/
GEM_BUG_ON(requested.mem_type < I915_PL_LMEM0 ||
busy[0].mem_type < I915_PL_LMEM0);
/* First try only the requested placement. No eviction. */ /* First try only the requested placement. No eviction. */
real_num_busy = fetch_and_zero(&placement.num_busy_placement); real_num_busy = fetch_and_zero(&placement.num_busy_placement);
ret = ttm_bo_validate(bo, &placement, &ctx); ret = ttm_bo_validate(bo, &placement, &ctx);
...@@ -635,9 +648,6 @@ static int i915_ttm_get_pages(struct drm_i915_gem_object *obj) ...@@ -635,9 +648,6 @@ static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
ret == -EAGAIN) ret == -EAGAIN)
return ret; return ret;
/* TODO: Remove this when we support system as TTM. */
real_num_busy = 1;
/* /*
* If the initial attempt fails, allow all accepted placements, * If the initial attempt fails, allow all accepted placements,
* evicting if necessary. * evicting if necessary.
...@@ -873,3 +883,25 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, ...@@ -873,3 +883,25 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
return 0; return 0;
} }
static const struct intel_memory_region_ops ttm_system_region_ops = {
.init_object = __i915_gem_ttm_object_init,
};
struct intel_memory_region *
i915_gem_ttm_system_setup(struct drm_i915_private *i915,
u16 type, u16 instance)
{
struct intel_memory_region *mr;
mr = intel_memory_region_create(i915, 0,
totalram_pages() << PAGE_SHIFT,
PAGE_SIZE, 0,
type, instance,
&ttm_system_region_ops);
if (IS_ERR(mr))
return mr;
intel_memory_region_set_name(mr, "system-ttm");
return mr;
}
...@@ -1751,9 +1751,6 @@ void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); ...@@ -1751,9 +1751,6 @@ void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
void i915_gem_init_early(struct drm_i915_private *dev_priv); void i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
u16 type, u16 instance);
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{ {
/* /*
......
...@@ -173,7 +173,12 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915) ...@@ -173,7 +173,12 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
instance = intel_region_map[i].instance; instance = intel_region_map[i].instance;
switch (type) { switch (type) {
case INTEL_MEMORY_SYSTEM: case INTEL_MEMORY_SYSTEM:
mem = i915_gem_shmem_setup(i915, type, instance); if (IS_DGFX(i915))
mem = i915_gem_ttm_system_setup(i915, type,
instance);
else
mem = i915_gem_shmem_setup(i915, type,
instance);
break; break;
case INTEL_MEMORY_STOLEN_LOCAL: case INTEL_MEMORY_STOLEN_LOCAL:
mem = i915_gem_stolen_lmem_setup(i915, type, instance); mem = i915_gem_stolen_lmem_setup(i915, type, instance);
......
...@@ -125,4 +125,12 @@ intel_memory_region_set_name(struct intel_memory_region *mem, ...@@ -125,4 +125,12 @@ intel_memory_region_set_name(struct intel_memory_region *mem,
int intel_memory_region_reserve(struct intel_memory_region *mem, int intel_memory_region_reserve(struct intel_memory_region *mem,
resource_size_t offset, resource_size_t offset,
resource_size_t size); resource_size_t size);
struct intel_memory_region *
i915_gem_ttm_system_setup(struct drm_i915_private *i915,
u16 type, u16 instance);
struct intel_memory_region *
i915_gem_shmem_setup(struct drm_i915_private *i915,
u16 type, u16 instance);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment