Commit 7ae03459 authored by Matthew Auld's avatar Matthew Auld

drm/i915/ttm: add tt shmem backend

For cached objects we can allocate our pages directly in shmem. This
should make it possible(in a later patch) to utilise the existing
i915-gem shrinker code for such objects. For now this is still disabled.

v2(Thomas):
  - Add optional try_to_writeback hook for objects. Importantly we need
    to check if the object is even still shrinkable; in between us
    dropping the shrinker LRU lock and acquiring the object lock it could for
    example have been moved. Also we need to differentiate between
    "lazy" shrinking and the immediate writeback mode. Also later we need to
    handle objects which don't even have mm.pages, so bundling this into
    put_pages() would require somehow handling that edge case, hence
    just letting the ttm backend handle everything in try_to_writeback
    doesn't seem too bad.
v3(Thomas):
  - Likely a bad idea to touch the object from the unpopulate hook,
    since it's not possible to hold a reference, without also creating
    circular dependency, so likely this is too fragile. For now just
    ensure we at least mark the pages as dirty/accessed when called from the
    shrinker on WILLNEED objects.
  - s/try_to_writeback/shrinker_release_pages, since this can do more
    than just writeback.
  - Get rid of do_backup boolean and just set the SWAPPED flag prior to
    calling unpopulate.
  - Keep shmem_tt as lowest priority for the TTM LRU bo_swapout walk, since
    these just get skipped anyway. We can try to come up with something
    better later.
v4(Thomas):
  - s/PCI_DMA/DMA/. Also drop NO_KERNEL_MAPPING and NO_WARN, which
    apparently doesn't do anything with streaming mappings.
  - Just pass along the error for ->truncate, and assume nothing.
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Oak Zeng <oak.zeng@intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Acked-by: default avatarOak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-2-matthew.auld@intel.com
parent f05b985e
...@@ -93,7 +93,6 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915); ...@@ -93,7 +93,6 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915);
struct sg_table * struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
/** /**
* i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
...@@ -449,7 +448,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) ...@@ -449,7 +448,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
} }
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj); int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj); void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
/** /**
...@@ -613,6 +612,14 @@ int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj, ...@@ -613,6 +612,14 @@ int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
enum intel_memory_type type); enum intel_memory_type type);
struct sg_table *shmem_alloc_st(struct drm_i915_private *i915,
size_t size, struct intel_memory_region *mr,
struct address_space *mapping,
unsigned int max_segment);
void shmem_free_st(struct sg_table *st, struct address_space *mapping,
bool dirty, bool backup);
void __shmem_writeback(size_t size, struct address_space *mapping);
#ifdef CONFIG_MMU_NOTIFIER #ifdef CONFIG_MMU_NOTIFIER
static inline bool static inline bool
i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
......
...@@ -54,8 +54,10 @@ struct drm_i915_gem_object_ops { ...@@ -54,8 +54,10 @@ struct drm_i915_gem_object_ops {
int (*get_pages)(struct drm_i915_gem_object *obj); int (*get_pages)(struct drm_i915_gem_object *obj);
void (*put_pages)(struct drm_i915_gem_object *obj, void (*put_pages)(struct drm_i915_gem_object *obj,
struct sg_table *pages); struct sg_table *pages);
void (*truncate)(struct drm_i915_gem_object *obj); int (*truncate)(struct drm_i915_gem_object *obj);
void (*writeback)(struct drm_i915_gem_object *obj); void (*writeback)(struct drm_i915_gem_object *obj);
int (*shrinker_release_pages)(struct drm_i915_gem_object *obj,
bool should_writeback);
int (*pread)(struct drm_i915_gem_object *obj, int (*pread)(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pread *arg); const struct drm_i915_gem_pread *arg);
......
...@@ -158,11 +158,13 @@ int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj) ...@@ -158,11 +158,13 @@ int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
} }
/* Immediately discard the backing storage */ /* Immediately discard the backing storage */
void i915_gem_object_truncate(struct drm_i915_gem_object *obj) int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{ {
drm_gem_free_mmap_offset(&obj->base); drm_gem_free_mmap_offset(&obj->base);
if (obj->ops->truncate) if (obj->ops->truncate)
obj->ops->truncate(obj); return obj->ops->truncate(obj);
return 0;
} }
/* Try to discard unwanted pages */ /* Try to discard unwanted pages */
......
...@@ -25,8 +25,8 @@ static void check_release_pagevec(struct pagevec *pvec) ...@@ -25,8 +25,8 @@ static void check_release_pagevec(struct pagevec *pvec)
cond_resched(); cond_resched();
} }
static void shmem_free_st(struct sg_table *st, struct address_space *mapping, void shmem_free_st(struct sg_table *st, struct address_space *mapping,
bool dirty, bool backup) bool dirty, bool backup)
{ {
struct sgt_iter sgt_iter; struct sgt_iter sgt_iter;
struct pagevec pvec; struct pagevec pvec;
...@@ -52,10 +52,10 @@ static void shmem_free_st(struct sg_table *st, struct address_space *mapping, ...@@ -52,10 +52,10 @@ static void shmem_free_st(struct sg_table *st, struct address_space *mapping,
kfree(st); kfree(st);
} }
static struct sg_table *shmem_alloc_st(struct drm_i915_private *i915, struct sg_table *shmem_alloc_st(struct drm_i915_private *i915,
size_t size, struct intel_memory_region *mr, size_t size, struct intel_memory_region *mr,
struct address_space *mapping, struct address_space *mapping,
unsigned int max_segment) unsigned int max_segment)
{ {
const unsigned long page_count = size / PAGE_SIZE; const unsigned long page_count = size / PAGE_SIZE;
unsigned long i; unsigned long i;
...@@ -271,7 +271,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj) ...@@ -271,7 +271,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
return ret; return ret;
} }
static void static int
shmem_truncate(struct drm_i915_gem_object *obj) shmem_truncate(struct drm_i915_gem_object *obj)
{ {
/* /*
...@@ -283,9 +283,11 @@ shmem_truncate(struct drm_i915_gem_object *obj) ...@@ -283,9 +283,11 @@ shmem_truncate(struct drm_i915_gem_object *obj)
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
obj->mm.madv = __I915_MADV_PURGED; obj->mm.madv = __I915_MADV_PURGED;
obj->mm.pages = ERR_PTR(-EFAULT); obj->mm.pages = ERR_PTR(-EFAULT);
return 0;
} }
static void __shmem_writeback(size_t size, struct address_space *mapping) void __shmem_writeback(size_t size, struct address_space *mapping)
{ {
struct writeback_control wbc = { struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE, .sync_mode = WB_SYNC_NONE,
......
...@@ -56,19 +56,24 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj, ...@@ -56,19 +56,24 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
return false; return false;
} }
static void try_to_writeback(struct drm_i915_gem_object *obj, static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags)
unsigned int flags)
{ {
if (obj->ops->shrinker_release_pages)
return obj->ops->shrinker_release_pages(obj,
flags & I915_SHRINK_WRITEBACK);
switch (obj->mm.madv) { switch (obj->mm.madv) {
case I915_MADV_DONTNEED: case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj); i915_gem_object_truncate(obj);
return; return 0;
case __I915_MADV_PURGED: case __I915_MADV_PURGED:
return; return 0;
} }
if (flags & I915_SHRINK_WRITEBACK) if (flags & I915_SHRINK_WRITEBACK)
i915_gem_object_writeback(obj); i915_gem_object_writeback(obj);
return 0;
} }
/** /**
...@@ -222,8 +227,8 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, ...@@ -222,8 +227,8 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
} }
if (!__i915_gem_object_put_pages(obj)) { if (!__i915_gem_object_put_pages(obj)) {
try_to_writeback(obj, shrink); if (!try_to_writeback(obj, shrink))
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
} }
if (!ww) if (!ww)
i915_gem_object_unlock(obj); i915_gem_object_unlock(obj);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment