Commit a4f5ea64 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Refactor object page API

The plan is to make obtaining the backing storage for the object avoid
struct_mutex (i.e. use its own locking). The first step is to update the
API so that normal users only call pin/unpin whilst working on the
backing storage.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-12-chris@chris-wilson.co.uk
parent d2a84a76
...@@ -1290,7 +1290,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, ...@@ -1290,7 +1290,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
} }
if (ret == 0 && needs_clflush_after) if (ret == 0 && needs_clflush_after)
drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len); drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
i915_gem_object_unpin_map(shadow_batch_obj); i915_gem_object_unpin_map(shadow_batch_obj);
return ret; return ret;
......
...@@ -112,7 +112,7 @@ static char get_global_flag(struct drm_i915_gem_object *obj) ...@@ -112,7 +112,7 @@ static char get_global_flag(struct drm_i915_gem_object *obj)
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{ {
return obj->mapping ? 'M' : ' '; return obj->mm.mapping ? 'M' : ' ';
} }
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
...@@ -158,8 +158,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -158,8 +158,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
i915_gem_active_get_seqno(&obj->last_write, i915_gem_active_get_seqno(&obj->last_write,
&obj->base.dev->struct_mutex), &obj->base.dev->struct_mutex),
i915_cache_level_str(dev_priv, obj->cache_level), i915_cache_level_str(dev_priv, obj->cache_level),
obj->dirty ? " dirty" : "", obj->mm.dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name) if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name); seq_printf(m, " (name: %d)", obj->base.name);
list_for_each_entry(vma, &obj->vma_list, obj_link) { list_for_each_entry(vma, &obj->vma_list, obj_link) {
...@@ -403,12 +403,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data) ...@@ -403,12 +403,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
size += obj->base.size; size += obj->base.size;
++count; ++count;
if (obj->madv == I915_MADV_DONTNEED) { if (obj->mm.madv == I915_MADV_DONTNEED) {
purgeable_size += obj->base.size; purgeable_size += obj->base.size;
++purgeable_count; ++purgeable_count;
} }
if (obj->mapping) { if (obj->mm.mapping) {
mapped_count++; mapped_count++;
mapped_size += obj->base.size; mapped_size += obj->base.size;
} }
...@@ -425,12 +425,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data) ...@@ -425,12 +425,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
++dpy_count; ++dpy_count;
} }
if (obj->madv == I915_MADV_DONTNEED) { if (obj->mm.madv == I915_MADV_DONTNEED) {
purgeable_size += obj->base.size; purgeable_size += obj->base.size;
++purgeable_count; ++purgeable_count;
} }
if (obj->mapping) { if (obj->mm.mapping) {
mapped_count++; mapped_count++;
mapped_size += obj->base.size; mapped_size += obj->base.size;
} }
...@@ -2028,7 +2028,7 @@ static void i915_dump_lrc_obj(struct seq_file *m, ...@@ -2028,7 +2028,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
seq_printf(m, "\tBound in GGTT at 0x%08x\n", seq_printf(m, "\tBound in GGTT at 0x%08x\n",
i915_ggtt_offset(vma)); i915_ggtt_offset(vma));
if (i915_gem_object_get_pages(vma->obj)) { if (i915_gem_object_pin_pages(vma->obj)) {
seq_puts(m, "\tFailed to get pages for context object\n\n"); seq_puts(m, "\tFailed to get pages for context object\n\n");
return; return;
} }
...@@ -2047,6 +2047,7 @@ static void i915_dump_lrc_obj(struct seq_file *m, ...@@ -2047,6 +2047,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
kunmap_atomic(reg_state); kunmap_atomic(reg_state);
} }
i915_gem_object_unpin_pages(vma->obj);
seq_putc(m, '\n'); seq_putc(m, '\n');
} }
......
...@@ -2252,17 +2252,6 @@ struct drm_i915_gem_object { ...@@ -2252,17 +2252,6 @@ struct drm_i915_gem_object {
*/ */
#define I915_BO_ACTIVE_REF (I915_BO_ACTIVE_SHIFT + I915_NUM_ENGINES) #define I915_BO_ACTIVE_REF (I915_BO_ACTIVE_SHIFT + I915_NUM_ENGINES)
/**
* This is set if the object has been written to since last bound
* to the GTT
*/
unsigned int dirty:1;
/**
* Advice: are the backing pages purgeable?
*/
unsigned int madv:2;
/* /*
* Is the object to be mapped as read-only to the GPU * Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit * Only honoured if hardware has relevant pte bit
...@@ -2284,8 +2273,12 @@ struct drm_i915_gem_object { ...@@ -2284,8 +2273,12 @@ struct drm_i915_gem_object {
unsigned int bind_count; unsigned int bind_count;
unsigned int pin_display; unsigned int pin_display;
struct {
unsigned int pages_pin_count;
struct sg_table *pages; struct sg_table *pages;
int pages_pin_count; void *mapping;
struct i915_gem_object_page_iter { struct i915_gem_object_page_iter {
struct scatterlist *sg_pos; struct scatterlist *sg_pos;
unsigned int sg_idx; /* in pages, but 32bit eek! */ unsigned int sg_idx; /* in pages, but 32bit eek! */
...@@ -2293,7 +2286,18 @@ struct drm_i915_gem_object { ...@@ -2293,7 +2286,18 @@ struct drm_i915_gem_object {
struct radix_tree_root radix; struct radix_tree_root radix;
struct mutex lock; /* protects this cache */ struct mutex lock; /* protects this cache */
} get_page; } get_page;
void *mapping;
/**
* Advice: are the backing pages purgeable?
*/
unsigned int madv:2;
/**
* This is set if the object has been written to since the
* pages were last acquired.
*/
bool dirty:1;
} mm;
/** Breadcrumb of last rendering to the buffer. /** Breadcrumb of last rendering to the buffer.
* There can only be one writer, but we allow for multiple readers. * There can only be one writer, but we allow for multiple readers.
...@@ -3182,14 +3186,11 @@ void i915_vma_close(struct i915_vma *vma); ...@@ -3182,14 +3186,11 @@ void i915_vma_close(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma); void i915_vma_destroy(struct i915_vma *vma);
int i915_gem_object_unbind(struct drm_i915_gem_object *obj); int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); static inline int __sg_page_count(const struct scatterlist *sg)
static inline int __sg_page_count(struct scatterlist *sg)
{ {
return sg->length >> PAGE_SHIFT; return sg->length >> PAGE_SHIFT;
} }
...@@ -3210,19 +3211,52 @@ dma_addr_t ...@@ -3210,19 +3211,52 @@ dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
unsigned long n); unsigned long n);
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
if (obj->mm.pages_pin_count++)
return 0;
return __i915_gem_object_get_pages(obj);
}
static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(!obj->mm.pages);
obj->mm.pages_pin_count++;
}
static inline bool
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
{ {
GEM_BUG_ON(obj->pages == NULL); return obj->mm.pages_pin_count;
obj->pages_pin_count++; }
static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
GEM_BUG_ON(!obj->mm.pages);
obj->mm.pages_pin_count--;
GEM_BUG_ON(obj->mm.pages_pin_count < obj->bind_count);
} }
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{ {
GEM_BUG_ON(obj->pages_pin_count == 0); __i915_gem_object_unpin_pages(obj);
obj->pages_pin_count--;
GEM_BUG_ON(obj->pages_pin_count < obj->bind_count);
} }
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
enum i915_map_type { enum i915_map_type {
I915_MAP_WB = 0, I915_MAP_WB = 0,
I915_MAP_WC, I915_MAP_WC,
......
...@@ -216,7 +216,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) ...@@ -216,7 +216,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
sg_dma_address(sg) = obj->phys_handle->busaddr; sg_dma_address(sg) = obj->phys_handle->busaddr;
sg_dma_len(sg) = obj->base.size; sg_dma_len(sg) = obj->base.size;
obj->pages = st; obj->mm.pages = st;
return 0; return 0;
} }
...@@ -225,7 +225,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) ...@@ -225,7 +225,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
{ {
int ret; int ret;
BUG_ON(obj->madv == __I915_MADV_PURGED); GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
ret = i915_gem_object_set_to_cpu_domain(obj, true); ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (WARN_ON(ret)) { if (WARN_ON(ret)) {
...@@ -235,10 +235,10 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) ...@@ -235,10 +235,10 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
} }
if (obj->madv == I915_MADV_DONTNEED) if (obj->mm.madv == I915_MADV_DONTNEED)
obj->dirty = 0; obj->mm.dirty = false;
if (obj->dirty) { if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping; struct address_space *mapping = obj->base.filp->f_mapping;
char *vaddr = obj->phys_handle->vaddr; char *vaddr = obj->phys_handle->vaddr;
int i; int i;
...@@ -257,22 +257,23 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) ...@@ -257,22 +257,23 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
kunmap_atomic(dst); kunmap_atomic(dst);
set_page_dirty(page); set_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED) if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page); mark_page_accessed(page);
put_page(page); put_page(page);
vaddr += PAGE_SIZE; vaddr += PAGE_SIZE;
} }
obj->dirty = 0; obj->mm.dirty = false;
} }
sg_free_table(obj->pages); sg_free_table(obj->mm.pages);
kfree(obj->pages); kfree(obj->mm.pages);
} }
static void static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj) i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{ {
drm_pci_free(obj->base.dev, obj->phys_handle); drm_pci_free(obj->base.dev, obj->phys_handle);
i915_gem_object_unpin_pages(obj);
} }
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
...@@ -507,7 +508,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, ...@@ -507,7 +508,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
return 0; return 0;
} }
if (obj->madv != I915_MADV_WILLNEED) if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT; return -EFAULT;
if (obj->base.filp == NULL) if (obj->base.filp == NULL)
...@@ -517,7 +518,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, ...@@ -517,7 +518,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_object_put_pages(obj); ret = __i915_gem_object_put_pages(obj);
if (ret) if (ret)
return ret; return ret;
...@@ -529,7 +530,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, ...@@ -529,7 +530,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
obj->phys_handle = phys; obj->phys_handle = phys;
obj->ops = &i915_gem_phys_ops; obj->ops = &i915_gem_phys_ops;
return i915_gem_object_get_pages(obj); return i915_gem_object_pin_pages(obj);
} }
static int static int
...@@ -725,12 +726,10 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, ...@@ -725,12 +726,10 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_pin_pages(obj);
if (ret) if (ret)
return ret; return ret;
i915_gem_object_pin_pages(obj);
i915_gem_object_flush_gtt_write_domain(obj); i915_gem_object_flush_gtt_write_domain(obj);
/* If we're not in the cpu read domain, set ourself into the gtt /* If we're not in the cpu read domain, set ourself into the gtt
...@@ -778,12 +777,10 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, ...@@ -778,12 +777,10 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_pin_pages(obj);
if (ret) if (ret)
return ret; return ret;
i915_gem_object_pin_pages(obj);
i915_gem_object_flush_gtt_write_domain(obj); i915_gem_object_flush_gtt_write_domain(obj);
/* If we're not in the cpu write domain, set ourself into the /* If we're not in the cpu write domain, set ourself into the
...@@ -813,7 +810,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, ...@@ -813,7 +810,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
obj->cache_dirty = true; obj->cache_dirty = true;
intel_fb_obj_invalidate(obj, ORIGIN_CPU); intel_fb_obj_invalidate(obj, ORIGIN_CPU);
obj->dirty = 1; obj->mm.dirty = true;
/* return with the pages pinned */ /* return with the pages pinned */
return 0; return 0;
...@@ -951,13 +948,11 @@ i915_gem_gtt_pread(struct drm_device *dev, ...@@ -951,13 +948,11 @@ i915_gem_gtt_pread(struct drm_device *dev,
if (ret) if (ret)
goto out; goto out;
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_pin_pages(obj);
if (ret) { if (ret) {
remove_mappable_node(&node); remove_mappable_node(&node);
goto out; goto out;
} }
i915_gem_object_pin_pages(obj);
} }
ret = i915_gem_object_set_to_gtt_domain(obj, false); ret = i915_gem_object_set_to_gtt_domain(obj, false);
...@@ -1064,7 +1059,7 @@ i915_gem_shmem_pread(struct drm_device *dev, ...@@ -1064,7 +1059,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
offset = args->offset; offset = args->offset;
remain = args->size; remain = args->size;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, for_each_sg_page(obj->mm.pages->sgl, &sg_iter, obj->mm.pages->nents,
offset >> PAGE_SHIFT) { offset >> PAGE_SHIFT) {
struct page *page = sg_page_iter_page(&sg_iter); struct page *page = sg_page_iter_page(&sg_iter);
...@@ -1254,13 +1249,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, ...@@ -1254,13 +1249,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
if (ret) if (ret)
goto out; goto out;
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_pin_pages(obj);
if (ret) { if (ret) {
remove_mappable_node(&node); remove_mappable_node(&node);
goto out; goto out;
} }
i915_gem_object_pin_pages(obj);
} }
ret = i915_gem_object_set_to_gtt_domain(obj, true); ret = i915_gem_object_set_to_gtt_domain(obj, true);
...@@ -1268,7 +1261,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, ...@@ -1268,7 +1261,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
goto out_unpin; goto out_unpin;
intel_fb_obj_invalidate(obj, ORIGIN_CPU); intel_fb_obj_invalidate(obj, ORIGIN_CPU);
obj->dirty = true; obj->mm.dirty = true;
user_data = u64_to_user_ptr(args->data_ptr); user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset; offset = args->offset;
...@@ -1439,7 +1432,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ...@@ -1439,7 +1432,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
offset = args->offset; offset = args->offset;
remain = args->size; remain = args->size;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, for_each_sg_page(obj->mm.pages->sgl, &sg_iter, obj->mm.pages->nents,
offset >> PAGE_SHIFT) { offset >> PAGE_SHIFT) {
struct page *page = sg_page_iter_page(&sg_iter); struct page *page = sg_page_iter_page(&sg_iter);
int partial_cacheline_write; int partial_cacheline_write;
...@@ -2266,7 +2259,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) ...@@ -2266,7 +2259,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
* backing pages, *now*. * backing pages, *now*.
*/ */
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
obj->madv = __I915_MADV_PURGED; obj->mm.madv = __I915_MADV_PURGED;
} }
/* Try to discard unwanted pages */ /* Try to discard unwanted pages */
...@@ -2275,7 +2268,7 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj) ...@@ -2275,7 +2268,7 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{ {
struct address_space *mapping; struct address_space *mapping;
switch (obj->madv) { switch (obj->mm.madv) {
case I915_MADV_DONTNEED: case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj); i915_gem_object_truncate(obj);
case __I915_MADV_PURGED: case __I915_MADV_PURGED:
...@@ -2296,7 +2289,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2296,7 +2289,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
struct page *page; struct page *page;
int ret; int ret;
BUG_ON(obj->madv == __I915_MADV_PURGED); GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
ret = i915_gem_object_set_to_cpu_domain(obj, true); ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (WARN_ON(ret)) { if (WARN_ON(ret)) {
...@@ -2312,22 +2305,22 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2312,22 +2305,22 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (i915_gem_object_needs_bit17_swizzle(obj)) if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj); i915_gem_object_save_bit_17_swizzle(obj);
if (obj->madv == I915_MADV_DONTNEED) if (obj->mm.madv == I915_MADV_DONTNEED)
obj->dirty = 0; obj->mm.dirty = false;
for_each_sgt_page(page, sgt_iter, obj->pages) { for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
if (obj->dirty) if (obj->mm.dirty)
set_page_dirty(page); set_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED) if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page); mark_page_accessed(page);
put_page(page); put_page(page);
} }
obj->dirty = 0; obj->mm.dirty = false;
sg_free_table(obj->pages); sg_free_table(obj->mm.pages);
kfree(obj->pages); kfree(obj->mm.pages);
} }
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
...@@ -2335,21 +2328,20 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) ...@@ -2335,21 +2328,20 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
struct radix_tree_iter iter; struct radix_tree_iter iter;
void **slot; void **slot;
radix_tree_for_each_slot(slot, &obj->get_page.radix, &iter, 0) radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
radix_tree_delete(&obj->get_page.radix, iter.index); radix_tree_delete(&obj->mm.get_page.radix, iter.index);
} }
int int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{ {
const struct drm_i915_gem_object_ops *ops = obj->ops; const struct drm_i915_gem_object_ops *ops = obj->ops;
lockdep_assert_held(&obj->base.dev->struct_mutex); lockdep_assert_held(&obj->base.dev->struct_mutex);
if (obj->pages == NULL) if (!obj->mm.pages)
return 0; return 0;
if (obj->pages_pin_count) if (i915_gem_object_has_pinned_pages(obj))
return -EBUSY; return -EBUSY;
GEM_BUG_ON(obj->bind_count); GEM_BUG_ON(obj->bind_count);
...@@ -2359,22 +2351,22 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) ...@@ -2359,22 +2351,22 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
* lists early. */ * lists early. */
list_del(&obj->global_list); list_del(&obj->global_list);
if (obj->mapping) { if (obj->mm.mapping) {
void *ptr; void *ptr;
ptr = ptr_mask_bits(obj->mapping); ptr = ptr_mask_bits(obj->mm.mapping);
if (is_vmalloc_addr(ptr)) if (is_vmalloc_addr(ptr))
vunmap(ptr); vunmap(ptr);
else else
kunmap(kmap_to_page(ptr)); kunmap(kmap_to_page(ptr));
obj->mapping = NULL; obj->mm.mapping = NULL;
} }
__i915_gem_object_reset_page_iter(obj); __i915_gem_object_reset_page_iter(obj);
ops->put_pages(obj); ops->put_pages(obj);
obj->pages = NULL; obj->mm.pages = NULL;
i915_gem_object_invalidate(obj); i915_gem_object_invalidate(obj);
...@@ -2474,7 +2466,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2474,7 +2466,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
} }
if (sg) /* loop terminated early; short sg table */ if (sg) /* loop terminated early; short sg table */
sg_mark_end(sg); sg_mark_end(sg);
obj->pages = st; obj->mm.pages = st;
ret = i915_gem_gtt_prepare_object(obj); ret = i915_gem_gtt_prepare_object(obj);
if (ret) if (ret)
...@@ -2485,7 +2477,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2485,7 +2477,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (i915_gem_object_is_tiled(obj) && if (i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
i915_gem_object_pin_pages(obj); __i915_gem_object_pin_pages(obj);
return 0; return 0;
...@@ -2517,8 +2509,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2517,8 +2509,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* either as a result of memory pressure (reaping pages under the shrinker) * either as a result of memory pressure (reaping pages under the shrinker)
* or as the object is itself released. * or as the object is itself released.
*/ */
int int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
const struct drm_i915_gem_object_ops *ops = obj->ops; const struct drm_i915_gem_object_ops *ops = obj->ops;
...@@ -2526,24 +2517,25 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) ...@@ -2526,24 +2517,25 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
lockdep_assert_held(&obj->base.dev->struct_mutex); lockdep_assert_held(&obj->base.dev->struct_mutex);
if (obj->pages) if (obj->mm.pages)
return 0; return 0;
if (obj->madv != I915_MADV_WILLNEED) { if (obj->mm.madv != I915_MADV_WILLNEED) {
DRM_DEBUG("Attempting to obtain a purgeable object\n"); DRM_DEBUG("Attempting to obtain a purgeable object\n");
__i915_gem_object_unpin_pages(obj);
return -EFAULT; return -EFAULT;
} }
BUG_ON(obj->pages_pin_count);
ret = ops->get_pages(obj); ret = ops->get_pages(obj);
if (ret) if (ret) {
__i915_gem_object_unpin_pages(obj);
return ret; return ret;
}
list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list); list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
obj->get_page.sg_pos = obj->pages->sgl; obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
obj->get_page.sg_idx = 0; obj->mm.get_page.sg_idx = 0;
return 0; return 0;
} }
...@@ -2553,7 +2545,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, ...@@ -2553,7 +2545,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
enum i915_map_type type) enum i915_map_type type)
{ {
unsigned long n_pages = obj->base.size >> PAGE_SHIFT; unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt = obj->pages; struct sg_table *sgt = obj->mm.pages;
struct sgt_iter sgt_iter; struct sgt_iter sgt_iter;
struct page *page; struct page *page;
struct page *stack_pages[32]; struct page *stack_pages[32];
...@@ -2607,14 +2599,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -2607,14 +2599,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex); lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_pin_pages(obj);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
i915_gem_object_pin_pages(obj); pinned = obj->mm.pages_pin_count > 1;
pinned = obj->pages_pin_count > 1;
ptr = ptr_unpack_bits(obj->mapping, has_type); ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
if (ptr && has_type != type) { if (ptr && has_type != type) {
if (pinned) { if (pinned) {
ret = -EBUSY; ret = -EBUSY;
...@@ -2626,7 +2617,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -2626,7 +2617,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
else else
kunmap(kmap_to_page(ptr)); kunmap(kmap_to_page(ptr));
ptr = obj->mapping = NULL; ptr = obj->mm.mapping = NULL;
} }
if (!ptr) { if (!ptr) {
...@@ -2636,7 +2627,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -2636,7 +2627,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
goto err; goto err;
} }
obj->mapping = ptr_pack_bits(ptr, type); obj->mm.mapping = ptr_pack_bits(ptr, type);
} }
return ptr; return ptr;
...@@ -3087,7 +3078,7 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -3087,7 +3078,7 @@ int i915_vma_unbind(struct i915_vma *vma)
goto destroy; goto destroy;
GEM_BUG_ON(obj->bind_count == 0); GEM_BUG_ON(obj->bind_count == 0);
GEM_BUG_ON(!obj->pages); GEM_BUG_ON(!obj->mm.pages);
if (i915_vma_is_map_and_fenceable(vma)) { if (i915_vma_is_map_and_fenceable(vma)) {
/* release the fence reg _after_ flushing */ /* release the fence reg _after_ flushing */
...@@ -3111,7 +3102,7 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -3111,7 +3102,7 @@ int i915_vma_unbind(struct i915_vma *vma)
drm_mm_remove_node(&vma->node); drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list); list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
if (vma->pages != obj->pages) { if (vma->pages != obj->mm.pages) {
GEM_BUG_ON(!vma->pages); GEM_BUG_ON(!vma->pages);
sg_free_table(vma->pages); sg_free_table(vma->pages);
kfree(vma->pages); kfree(vma->pages);
...@@ -3244,12 +3235,10 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -3244,12 +3235,10 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
return -E2BIG; return -E2BIG;
} }
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_pin_pages(obj);
if (ret) if (ret)
return ret; return ret;
i915_gem_object_pin_pages(obj);
if (flags & PIN_OFFSET_FIXED) { if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK; u64 offset = flags & PIN_OFFSET_MASK;
if (offset & (alignment - 1) || offset > end - size) { if (offset & (alignment - 1) || offset > end - size) {
...@@ -3331,7 +3320,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj, ...@@ -3331,7 +3320,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* to GPU, and we can ignore the cache flush because it'll happen * to GPU, and we can ignore the cache flush because it'll happen
* again at bind time. * again at bind time.
*/ */
if (obj->pages == NULL) if (!obj->mm.pages)
return false; return false;
/* /*
...@@ -3355,7 +3344,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj, ...@@ -3355,7 +3344,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
} }
trace_i915_gem_object_clflush(obj); trace_i915_gem_object_clflush(obj);
drm_clflush_sg(obj->pages); drm_clflush_sg(obj->mm.pages);
obj->cache_dirty = false; obj->cache_dirty = false;
return true; return true;
...@@ -3469,7 +3458,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -3469,7 +3458,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
* continue to assume that the obj remained out of the CPU cached * continue to assume that the obj remained out of the CPU cached
* domain. * domain.
*/ */
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_pin_pages(obj);
if (ret) if (ret)
return ret; return ret;
...@@ -3493,7 +3482,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -3493,7 +3482,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (write) { if (write) {
obj->base.read_domains = I915_GEM_DOMAIN_GTT; obj->base.read_domains = I915_GEM_DOMAIN_GTT;
obj->base.write_domain = I915_GEM_DOMAIN_GTT; obj->base.write_domain = I915_GEM_DOMAIN_GTT;
obj->dirty = 1; obj->mm.dirty = true;
} }
trace_i915_gem_object_change_domain(obj, trace_i915_gem_object_change_domain(obj,
...@@ -3502,6 +3491,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -3502,6 +3491,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/* And bump the LRU for this access */ /* And bump the LRU for this access */
i915_gem_object_bump_inactive_ggtt(obj); i915_gem_object_bump_inactive_ggtt(obj);
i915_gem_object_unpin_pages(obj);
return 0; return 0;
} }
...@@ -4304,23 +4294,23 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, ...@@ -4304,23 +4294,23 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
goto unlock; goto unlock;
} }
if (obj->pages && if (obj->mm.pages &&
i915_gem_object_is_tiled(obj) && i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->madv == I915_MADV_WILLNEED) if (obj->mm.madv == I915_MADV_WILLNEED)
i915_gem_object_unpin_pages(obj); __i915_gem_object_unpin_pages(obj);
if (args->madv == I915_MADV_WILLNEED) if (args->madv == I915_MADV_WILLNEED)
i915_gem_object_pin_pages(obj); __i915_gem_object_pin_pages(obj);
} }
if (obj->madv != __I915_MADV_PURGED) if (obj->mm.madv != __I915_MADV_PURGED)
obj->madv = args->madv; obj->mm.madv = args->madv;
/* if the object is no longer attached, discard its backing storage */ /* if the object is no longer attached, discard its backing storage */
if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL) if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
i915_gem_object_truncate(obj); i915_gem_object_truncate(obj);
args->retained = obj->madv != __I915_MADV_PURGED; args->retained = obj->mm.madv != __I915_MADV_PURGED;
i915_gem_object_put(obj); i915_gem_object_put(obj);
unlock: unlock:
...@@ -4347,9 +4337,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, ...@@ -4347,9 +4337,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->ops = ops; obj->ops = ops;
obj->frontbuffer_ggtt_origin = ORIGIN_GTT; obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
obj->madv = I915_MADV_WILLNEED;
INIT_RADIX_TREE(&obj->get_page.radix, GFP_KERNEL | __GFP_NOWARN); obj->mm.madv = I915_MADV_WILLNEED;
mutex_init(&obj->get_page.lock); INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
} }
...@@ -4441,7 +4432,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj) ...@@ -4441,7 +4432,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
* back the contents from the GPU. * back the contents from the GPU.
*/ */
if (obj->madv != I915_MADV_WILLNEED) if (obj->mm.madv != I915_MADV_WILLNEED)
return false; return false;
if (obj->base.filp == NULL) if (obj->base.filp == NULL)
...@@ -4483,32 +4474,27 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) ...@@ -4483,32 +4474,27 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
} }
GEM_BUG_ON(obj->bind_count); GEM_BUG_ON(obj->bind_count);
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
* before progressing. */
if (obj->stolen)
i915_gem_object_unpin_pages(obj);
WARN_ON(atomic_read(&obj->frontbuffer_bits)); WARN_ON(atomic_read(&obj->frontbuffer_bits));
if (obj->pages && obj->madv == I915_MADV_WILLNEED && if (obj->mm.pages && obj->mm.madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES && dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
i915_gem_object_is_tiled(obj)) i915_gem_object_is_tiled(obj))
i915_gem_object_unpin_pages(obj); __i915_gem_object_unpin_pages(obj);
if (WARN_ON(obj->pages_pin_count)) if (obj->ops->release)
obj->pages_pin_count = 0; obj->ops->release(obj);
if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
obj->mm.pages_pin_count = 0;
if (discard_backing_storage(obj)) if (discard_backing_storage(obj))
obj->madv = I915_MADV_DONTNEED; obj->mm.madv = I915_MADV_DONTNEED;
i915_gem_object_put_pages(obj); __i915_gem_object_put_pages(obj);
BUG_ON(obj->pages); GEM_BUG_ON(obj->mm.pages);
if (obj->base.import_attach) if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL); drm_prime_gem_destroy(&obj->base, NULL);
if (obj->ops->release)
obj->ops->release(obj);
drm_gem_object_release(&obj->base); drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size); i915_gem_info_remove_obj(dev_priv, obj->base.size);
...@@ -5063,14 +5049,13 @@ i915_gem_object_create_from_data(struct drm_device *dev, ...@@ -5063,14 +5049,13 @@ i915_gem_object_create_from_data(struct drm_device *dev,
if (ret) if (ret)
goto fail; goto fail;
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_pin_pages(obj);
if (ret) if (ret)
goto fail; goto fail;
i915_gem_object_pin_pages(obj); sg = obj->mm.pages;
sg = obj->pages;
bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size); bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
obj->dirty = 1; /* Backing store is now out of date */ obj->mm.dirty = true; /* Backing store is now out of date */
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
if (WARN_ON(bytes != size)) { if (WARN_ON(bytes != size)) {
...@@ -5091,13 +5076,13 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj, ...@@ -5091,13 +5076,13 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
unsigned int n, unsigned int n,
unsigned int *offset) unsigned int *offset)
{ {
struct i915_gem_object_page_iter *iter = &obj->get_page; struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int idx, count; unsigned int idx, count;
might_sleep(); might_sleep();
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
GEM_BUG_ON(obj->pages_pin_count == 0); GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
/* As we iterate forward through the sg, we record each entry in a /* As we iterate forward through the sg, we record each entry in a
* radixtree for quick repeated (backwards) lookups. If we have seen * radixtree for quick repeated (backwards) lookups. If we have seen
...@@ -5222,7 +5207,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, ...@@ -5222,7 +5207,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
struct page *page; struct page *page;
page = i915_gem_object_get_page(obj, n); page = i915_gem_object_get_page(obj, n);
if (!obj->dirty) if (!obj->mm.dirty)
set_page_dirty(page); set_page_dirty(page);
return page; return page;
......
...@@ -130,11 +130,10 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, ...@@ -130,11 +130,10 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
return obj; return obj;
} }
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_pin_pages(obj);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
list_move_tail(&obj->batch_pool_link, list); list_move_tail(&obj->batch_pool_link, list);
i915_gem_object_pin_pages(obj);
return obj; return obj;
} }
...@@ -48,12 +48,10 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme ...@@ -48,12 +48,10 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
if (ret) if (ret)
goto err; goto err;
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_pin_pages(obj);
if (ret) if (ret)
goto err_unlock; goto err_unlock;
i915_gem_object_pin_pages(obj);
/* Copy sg so that we make an independent mapping */ /* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) { if (st == NULL) {
...@@ -61,13 +59,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme ...@@ -61,13 +59,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
goto err_unpin; goto err_unpin;
} }
ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
if (ret) if (ret)
goto err_free; goto err_free;
src = obj->pages->sgl; src = obj->mm.pages->sgl;
dst = st->sgl; dst = st->sgl;
for (i = 0; i < obj->pages->nents; i++) { for (i = 0; i < obj->mm.pages->nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0); sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst); dst = sg_next(dst);
src = sg_next(src); src = sg_next(src);
...@@ -299,14 +297,14 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) ...@@ -299,14 +297,14 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
if (IS_ERR(sg)) if (IS_ERR(sg))
return PTR_ERR(sg); return PTR_ERR(sg);
obj->pages = sg; obj->mm.pages = sg;
return 0; return 0;
} }
static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj) static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
{ {
dma_buf_unmap_attachment(obj->base.import_attach, dma_buf_unmap_attachment(obj->base.import_attach,
obj->pages, DMA_BIDIRECTIONAL); obj->mm.pages, DMA_BIDIRECTIONAL);
} }
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
......
...@@ -1281,7 +1281,7 @@ void i915_vma_move_to_active(struct i915_vma *vma, ...@@ -1281,7 +1281,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
obj->dirty = 1; /* be paranoid */ obj->mm.dirty = true; /* be paranoid */
/* Add a reference if we're newly entering the active list. /* Add a reference if we're newly entering the active list.
* The order in which we add operations to the retirement queue is * The order in which we add operations to the retirement queue is
......
...@@ -664,7 +664,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) ...@@ -664,7 +664,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
return; return;
i = 0; i = 0;
for_each_sgt_page(page, sgt_iter, obj->pages) { for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
char new_bit_17 = page_to_phys(page) >> 17; char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) != if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) { (test_bit(i, obj->bit_17) != 0)) {
...@@ -703,7 +703,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) ...@@ -703,7 +703,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
i = 0; i = 0;
for_each_sgt_page(page, sgt_iter, obj->pages) { for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
if (page_to_phys(page) & (1 << 17)) if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17); __set_bit(i, obj->bit_17);
else else
......
...@@ -175,7 +175,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma, ...@@ -175,7 +175,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
{ {
u32 pte_flags = 0; u32 pte_flags = 0;
vma->pages = vma->obj->pages; vma->pages = vma->obj->mm.pages;
/* Currently applicable only to VLV */ /* Currently applicable only to VLV */
if (vma->obj->gt_ro) if (vma->obj->gt_ro)
...@@ -2373,7 +2373,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) ...@@ -2373,7 +2373,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{ {
if (!dma_map_sg(&obj->base.dev->pdev->dev, if (!dma_map_sg(&obj->base.dev->pdev->dev,
obj->pages->sgl, obj->pages->nents, obj->mm.pages->sgl, obj->mm.pages->nents,
PCI_DMA_BIDIRECTIONAL)) PCI_DMA_BIDIRECTIONAL))
return -ENOSPC; return -ENOSPC;
...@@ -2710,7 +2710,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) ...@@ -2710,7 +2710,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
} }
} }
dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents, dma_unmap_sg(kdev, obj->mm.pages->sgl, obj->mm.pages->nents,
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
} }
...@@ -3548,7 +3548,7 @@ intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info, ...@@ -3548,7 +3548,7 @@ intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
/* Populate source page list from the object. */ /* Populate source page list from the object. */
i = 0; i = 0;
for_each_sgt_dma(dma_addr, sgt_iter, obj->pages) for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
page_addr_list[i++] = dma_addr; page_addr_list[i++] = dma_addr;
GEM_BUG_ON(i != n_pages); GEM_BUG_ON(i != n_pages);
...@@ -3641,7 +3641,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) ...@@ -3641,7 +3641,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
return 0; return 0;
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
vma->pages = vma->obj->pages; vma->pages = vma->obj->mm.pages;
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->pages = vma->pages =
intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj); intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
......
...@@ -102,10 +102,10 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -102,10 +102,10 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
sg = __sg_next(sg); sg = __sg_next(sg);
} while (1); } while (1);
obj->pages = st; obj->mm.pages = st;
if (i915_gem_gtt_prepare_object(obj)) { if (i915_gem_gtt_prepare_object(obj)) {
obj->pages = NULL; obj->mm.pages = NULL;
goto err; goto err;
} }
...@@ -114,7 +114,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -114,7 +114,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
* and the caller is expected to repopulate - the contents of this * and the caller is expected to repopulate - the contents of this
* object are only valid whilst active and pinned. * object are only valid whilst active and pinned.
*/ */
obj->madv = I915_MADV_DONTNEED; obj->mm.madv = I915_MADV_DONTNEED;
return 0; return 0;
err: err:
...@@ -126,10 +126,10 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -126,10 +126,10 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj) static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj)
{ {
i915_gem_gtt_finish_object(obj); i915_gem_gtt_finish_object(obj);
internal_free_pages(obj->pages); internal_free_pages(obj->mm.pages);
obj->dirty = 0; obj->mm.dirty = false;
obj->madv = I915_MADV_WILLNEED; obj->mm.madv = I915_MADV_WILLNEED;
} }
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
......
...@@ -230,7 +230,7 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req) ...@@ -230,7 +230,7 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
return 0; return 0;
/* Recreate the page after shrinking */ /* Recreate the page after shrinking */
if (!so->vma->obj->pages) if (!so->vma->obj->mm.pages)
so->batch_offset = -1; so->batch_offset = -1;
ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH); ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
......
...@@ -78,7 +78,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) ...@@ -78,7 +78,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* to the GPU, simply unbinding from the GPU is not going to succeed * to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves. * in releasing our pin count on the pages themselves.
*/ */
if (obj->pages_pin_count > obj->bind_count) if (obj->mm.pages_pin_count > obj->bind_count)
return false; return false;
if (any_vma_pinned(obj)) if (any_vma_pinned(obj))
...@@ -88,7 +88,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) ...@@ -88,7 +88,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* discard the contents (because the user has marked them as being * discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap. * purgeable) or if we can move their contents out to swap.
*/ */
return swap_available() || obj->madv == I915_MADV_DONTNEED; return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
} }
/** /**
...@@ -175,11 +175,11 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -175,11 +175,11 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
list_move_tail(&obj->global_list, &still_in_list); list_move_tail(&obj->global_list, &still_in_list);
if (flags & I915_SHRINK_PURGEABLE && if (flags & I915_SHRINK_PURGEABLE &&
obj->madv != I915_MADV_DONTNEED) obj->mm.madv != I915_MADV_DONTNEED)
continue; continue;
if (flags & I915_SHRINK_VMAPS && if (flags & I915_SHRINK_VMAPS &&
!is_vmalloc_addr(obj->mapping)) !is_vmalloc_addr(obj->mm.mapping))
continue; continue;
if (!(flags & I915_SHRINK_ACTIVE) && if (!(flags & I915_SHRINK_ACTIVE) &&
...@@ -194,7 +194,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -194,7 +194,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
/* For the unbound phase, this should be a no-op! */ /* For the unbound phase, this should be a no-op! */
i915_gem_object_unbind(obj); i915_gem_object_unbind(obj);
if (i915_gem_object_put_pages(obj) == 0) if (__i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put(obj); i915_gem_object_put(obj);
......
...@@ -555,16 +555,17 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) ...@@ -555,16 +555,17 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
{ {
/* Should only be called during free */ /* Should only be called during free */
sg_free_table(obj->pages); sg_free_table(obj->mm.pages);
kfree(obj->pages); kfree(obj->mm.pages);
} }
static void static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
__i915_gem_object_unpin_pages(obj);
if (obj->stolen) { if (obj->stolen) {
i915_gem_stolen_remove_node(dev_priv, obj->stolen); i915_gem_stolen_remove_node(dev_priv, obj->stolen);
kfree(obj->stolen); kfree(obj->stolen);
...@@ -590,15 +591,16 @@ _i915_gem_object_create_stolen(struct drm_device *dev, ...@@ -590,15 +591,16 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, stolen->size); drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops); i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->pages = i915_pages_create_for_stolen(dev, obj->mm.pages = i915_pages_create_for_stolen(dev,
stolen->start, stolen->size); stolen->start,
if (obj->pages == NULL) stolen->size);
if (!obj->mm.pages)
goto cleanup; goto cleanup;
obj->get_page.sg_pos = obj->pages->sgl; obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
obj->get_page.sg_idx = 0; obj->mm.get_page.sg_idx = 0;
i915_gem_object_pin_pages(obj); __i915_gem_object_pin_pages(obj);
obj->stolen = stolen; obj->stolen = stolen;
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
...@@ -718,14 +720,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -718,14 +720,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
goto err; goto err;
} }
vma->pages = obj->pages; vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND; vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
obj->bind_count++; obj->bind_count++;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
i915_gem_object_pin_pages(obj); __i915_gem_object_pin_pages(obj);
return obj; return obj;
......
...@@ -259,13 +259,13 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -259,13 +259,13 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!err) { if (!err) {
struct i915_vma *vma; struct i915_vma *vma;
if (obj->pages && if (obj->mm.pages &&
obj->madv == I915_MADV_WILLNEED && obj->mm.madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (args->tiling_mode == I915_TILING_NONE) if (args->tiling_mode == I915_TILING_NONE)
i915_gem_object_unpin_pages(obj); __i915_gem_object_unpin_pages(obj);
if (!i915_gem_object_is_tiled(obj)) if (!i915_gem_object_is_tiled(obj))
i915_gem_object_pin_pages(obj); __i915_gem_object_pin_pages(obj);
} }
list_for_each_entry(vma, &obj->vma_list, obj_link) { list_for_each_entry(vma, &obj->vma_list, obj_link) {
......
...@@ -73,10 +73,10 @@ static void cancel_userptr(struct work_struct *work) ...@@ -73,10 +73,10 @@ static void cancel_userptr(struct work_struct *work)
/* Cancel any active worker and force us to re-evaluate gup */ /* Cancel any active worker and force us to re-evaluate gup */
obj->userptr.work = NULL; obj->userptr.work = NULL;
if (obj->pages != NULL) { if (obj->mm.pages) {
/* We are inside a kthread context and can't be interrupted */ /* We are inside a kthread context and can't be interrupted */
WARN_ON(i915_gem_object_unbind(obj)); WARN_ON(i915_gem_object_unbind(obj));
WARN_ON(i915_gem_object_put_pages(obj)); WARN_ON(__i915_gem_object_put_pages(obj));
} }
i915_gem_object_put(obj); i915_gem_object_put(obj);
...@@ -432,15 +432,15 @@ __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj, ...@@ -432,15 +432,15 @@ __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
{ {
int ret; int ret;
ret = st_set_pages(&obj->pages, pvec, num_pages); ret = st_set_pages(&obj->mm.pages, pvec, num_pages);
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_gtt_prepare_object(obj); ret = i915_gem_gtt_prepare_object(obj);
if (ret) { if (ret) {
sg_free_table(obj->pages); sg_free_table(obj->mm.pages);
kfree(obj->pages); kfree(obj->mm.pages);
obj->pages = NULL; obj->mm.pages = NULL;
} }
return ret; return ret;
...@@ -530,8 +530,8 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -530,8 +530,8 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (ret == 0) { if (ret == 0) {
list_add_tail(&obj->global_list, list_add_tail(&obj->global_list,
&to_i915(dev)->mm.unbound_list); &to_i915(dev)->mm.unbound_list);
obj->get_page.sg_pos = obj->pages->sgl; obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
obj->get_page.sg_idx = 0; obj->mm.get_page.sg_idx = 0;
pinned = 0; pinned = 0;
} }
} }
...@@ -672,22 +672,22 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) ...@@ -672,22 +672,22 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
BUG_ON(obj->userptr.work != NULL); BUG_ON(obj->userptr.work != NULL);
__i915_gem_userptr_set_active(obj, false); __i915_gem_userptr_set_active(obj, false);
if (obj->madv != I915_MADV_WILLNEED) if (obj->mm.madv != I915_MADV_WILLNEED)
obj->dirty = 0; obj->mm.dirty = false;
i915_gem_gtt_finish_object(obj); i915_gem_gtt_finish_object(obj);
for_each_sgt_page(page, sgt_iter, obj->pages) { for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
if (obj->dirty) if (obj->mm.dirty)
set_page_dirty(page); set_page_dirty(page);
mark_page_accessed(page); mark_page_accessed(page);
put_page(page); put_page(page);
} }
obj->dirty = 0; obj->mm.dirty = false;
sg_free_table(obj->pages); sg_free_table(obj->mm.pages);
kfree(obj->pages); kfree(obj->mm.pages);
} }
static void static void
......
...@@ -896,8 +896,8 @@ static void capture_bo(struct drm_i915_error_buffer *err, ...@@ -896,8 +896,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->write_domain = obj->base.write_domain; err->write_domain = obj->base.write_domain;
err->fence_reg = vma->fence ? vma->fence->id : -1; err->fence_reg = vma->fence ? vma->fence->id : -1;
err->tiling = i915_gem_object_get_tiling(obj); err->tiling = i915_gem_object_get_tiling(obj);
err->dirty = obj->dirty; err->dirty = obj->mm.dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED; err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL; err->userptr = obj->userptr.mm != NULL;
err->cache_level = obj->cache_level; err->cache_level = obj->cache_level;
} }
......
...@@ -744,7 +744,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, ...@@ -744,7 +744,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
ce->lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
i915_ggtt_offset(ce->ring->vma); i915_ggtt_offset(ce->ring->vma);
ce->state->obj->dirty = true; ce->state->obj->mm.dirty = true;
/* Invalidate GuC TLB. */ /* Invalidate GuC TLB. */
if (i915.enable_guc_submission) { if (i915.enable_guc_submission) {
...@@ -2042,7 +2042,7 @@ populate_lr_context(struct i915_gem_context *ctx, ...@@ -2042,7 +2042,7 @@ populate_lr_context(struct i915_gem_context *ctx,
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret; return ret;
} }
ctx_obj->dirty = true; ctx_obj->mm.dirty = true;
/* The second page of the context object contains some fields which must /* The second page of the context object contains some fields which must
* be set up prior to the first execution. */ * be set up prior to the first execution. */
...@@ -2180,7 +2180,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv) ...@@ -2180,7 +2180,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
reg[CTX_RING_HEAD+1] = 0; reg[CTX_RING_HEAD+1] = 0;
reg[CTX_RING_TAIL+1] = 0; reg[CTX_RING_TAIL+1] = 0;
ce->state->obj->dirty = true; ce->state->obj->mm.dirty = true;
i915_gem_object_unpin_map(ce->state->obj); i915_gem_object_unpin_map(ce->state->obj);
ce->ring->head = ce->ring->tail = 0; ce->ring->head = ce->ring->tail = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment