Commit ecbf2060 authored by Matthew Auld's avatar Matthew Auld

drm/i915/ttm: wire up the object offset

For the ttm backend we can use existing placements fpfn and lpfn to
force the allocator to place the object at the requested offset,
potentially evicting stuff if the spot is currently occupied.
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarNirmoy Das <nirmoy.das@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220315181425.576828-5-matthew.auld@intel.com
parent 9b78b5da
......@@ -631,6 +631,8 @@ struct drm_i915_gem_object {
struct drm_mm_node *stolen;
resource_size_t bo_offset;
unsigned long scratch;
u64 encode;
......
......@@ -126,6 +126,8 @@ i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
static void
i915_ttm_place_from_region(const struct intel_memory_region *mr,
struct ttm_place *place,
resource_size_t offset,
resource_size_t size,
unsigned int flags)
{
memset(place, 0, sizeof(*place));
......@@ -133,7 +135,10 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr,
if (flags & I915_BO_ALLOC_CONTIGUOUS)
place->flags |= TTM_PL_FLAG_CONTIGUOUS;
if (mr->io_size && mr->io_size < mr->total) {
if (offset != I915_BO_INVALID_OFFSET) {
place->fpfn = offset >> PAGE_SHIFT;
place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
} else if (mr->io_size && mr->io_size < mr->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place->flags |= TTM_PL_FLAG_TOPDOWN;
} else {
......@@ -155,12 +160,14 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
placement->num_placement = 1;
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
obj->mm.region, requested, flags);
obj->mm.region, requested, obj->bo_offset,
obj->base.size, flags);
/* Cache this on object? */
placement->num_busy_placement = num_allowed;
for (i = 0; i < placement->num_busy_placement; ++i)
i915_ttm_place_from_region(obj->mm.placements[i], busy + i, flags);
i915_ttm_place_from_region(obj->mm.placements[i], busy + i,
obj->bo_offset, obj->base.size, flags);
if (num_allowed == 0) {
*busy = *requested;
......@@ -802,7 +809,8 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
struct ttm_placement placement;
int ret;
i915_ttm_place_from_region(mr, &requested, flags);
i915_ttm_place_from_region(mr, &requested, obj->bo_offset,
obj->base.size, flags);
placement.num_placement = 1;
placement.num_busy_placement = 1;
placement.placement = &requested;
......@@ -1159,6 +1167,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
obj->bo_offset = offset;
/* Don't put on a region list until we're either locked or fully initialized. */
obj->mm.region = mem;
INIT_LIST_HEAD(&obj->mm.region_link);
......
......@@ -71,7 +71,8 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
GEM_BUG_ON(min_page_size < mm->chunk_size);
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
if (place->fpfn + bman_res->base.num_pages != place->lpfn &&
place->flags & TTM_PL_FLAG_CONTIGUOUS) {
unsigned long pages;
size = roundup_pow_of_two(size);
......
......@@ -12,6 +12,7 @@
#include "intel_region_ttm.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
/**
* DOC: TTM support structure
......@@ -191,6 +192,7 @@ intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
*/
struct ttm_resource *
intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size,
unsigned int flags)
{
......@@ -202,7 +204,10 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
if (flags & I915_BO_ALLOC_CONTIGUOUS)
place.flags |= TTM_PL_FLAG_CONTIGUOUS;
if (mem->io_size && mem->io_size < mem->total) {
if (offset != I915_BO_INVALID_OFFSET) {
place.fpfn = offset >> PAGE_SHIFT;
place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
} else if (mem->io_size && mem->io_size < mem->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place.flags |= TTM_PL_FLAG_TOPDOWN;
} else {
......
......@@ -36,6 +36,7 @@ struct ttm_device_funcs *i915_ttm_driver(void);
#ifdef CONFIG_DRM_I915_SELFTEST
struct ttm_resource *
intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size,
unsigned int flags);
#endif
......
......@@ -26,6 +26,7 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj)
int err;
obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region,
obj->bo_offset,
obj->base.size,
obj->flags);
if (IS_ERR(obj->mm.res))
......@@ -71,6 +72,8 @@ static int mock_object_init(struct intel_memory_region *mem,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class, flags);
obj->bo_offset = offset;
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment