Commit 93735059 authored by Matthew Auld's avatar Matthew Auld

drm/i915/ttm: make eviction mappable aware

If we need to make room for some mappable object, then we should
only victimize objects that have one or pages that occupy the visible
portion of LMEM. Let's also create a new priority hint for objects that
are placed in mappable memory, where we know that CPU access was
requested, that way we hopefully victimize these last.

v2(Thomas): s/TTM_PL_PRIV/I915_PL_LMEM0/
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220228123607.580432-1-matthew.auld@intel.com
parent c03d9826
......@@ -7,8 +7,10 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_buddy.h>
#include "i915_drv.h"
#include "i915_ttm_buddy_manager.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
......@@ -22,6 +24,7 @@
#define I915_TTM_PRIO_PURGE 0
#define I915_TTM_PRIO_NO_PAGES 1
#define I915_TTM_PRIO_HAS_PAGES 2
#define I915_TTM_PRIO_NEEDS_CPU_ACCESS 3
/*
* Size of struct ttm_place vector in on-stack struct ttm_placement allocs
......@@ -339,6 +342,7 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
struct ttm_resource *res = bo->resource;
if (!obj)
return false;
......@@ -352,7 +356,48 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
return false;
/* Will do for now. Our pinned objects are still on TTM's LRU lists */
return i915_gem_object_evictable(obj);
if (!i915_gem_object_evictable(obj))
return false;
switch (res->mem_type) {
case I915_PL_LMEM0: {
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, res->mem_type);
struct i915_ttm_buddy_resource *bman_res =
to_ttm_buddy_resource(res);
struct drm_buddy *mm = bman_res->mm;
struct drm_buddy_block *block;
if (!place->fpfn && !place->lpfn)
return true;
GEM_BUG_ON(!place->lpfn);
/*
* If we just want something mappable then we can quickly check
* if the current victim resource is using any of the CPU
* visible portion.
*/
if (!place->fpfn &&
place->lpfn == i915_ttm_buddy_man_visible_size(man))
return bman_res->used_visible_size > 0;
/* Real range allocation */
list_for_each_entry(block, &bman_res->blocks, link) {
unsigned long fpfn =
drm_buddy_block_offset(block) >> PAGE_SHIFT;
unsigned long lpfn = fpfn +
(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
if (place->fpfn < lpfn && place->lpfn > fpfn)
return true;
}
return false;
} default:
break;
}
return true;
}
static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
......@@ -852,7 +897,23 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
} else if (!i915_gem_object_has_pages(obj)) {
bo->priority = I915_TTM_PRIO_NO_PAGES;
} else {
bo->priority = I915_TTM_PRIO_HAS_PAGES;
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, bo->resource->mem_type);
/*
* If we need to place an LMEM resource which doesn't need CPU
* access then we should try not to victimize mappable objects
* first, since we likely end up stealing more of the mappable
* portion. And likewise when we try to find space for a mappble
* object, we know not to ever victimize objects that don't
* occupy any mappable pages.
*/
if (i915_ttm_cpu_maps_iomem(bo->resource) &&
i915_ttm_buddy_man_visible_size(man) < man->size &&
!(obj->flags & I915_BO_ALLOC_GPU_ONLY))
bo->priority = I915_TTM_PRIO_NEEDS_CPU_ACCESS;
else
bo->priority = I915_TTM_PRIO_HAS_PAGES;
}
ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment