Commit 37e680a1 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Introduce drm_i915_gem_object_ops

In order to specialise functions depending upon the type of object, we
can attach vfuncs to each object via a new ->ops pointer.

For instance, this will be used in future patches to only bind pages from
a dma-buf for the duration that the object is used by the GPU - and so
prevent them from pinning those pages for the entire of the object.

v2: Bonus comments.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarBen Widawsky <ben@bwidawsk.net>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 8c0bd3c0
...@@ -899,9 +899,29 @@ enum i915_cache_level { ...@@ -899,9 +899,29 @@ enum i915_cache_level {
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
}; };
struct drm_i915_gem_object_ops {
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
* of pages before to binding them into the GTT, and put_pages() is
* called after we no longer need them. As we expect there to be
* associated cost with migrating pages between the backing storage
* and making them available for the GPU (e.g. clflush), we may hold
* onto the pages after they are no longer referenced by the GPU
* in case they may be used again shortly (for example migrating the
* pages to a different memory domain within the GTT). put_pages()
* will therefore most likely be called when the object itself is
* being released or under memory pressure (where we attempt to
* reap pages for the shrinker).
*/
int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *);
};
struct drm_i915_gem_object { struct drm_i915_gem_object {
struct drm_gem_object base; struct drm_gem_object base;
const struct drm_i915_gem_object_ops *ops;
/** Current space allocated to this object in the GTT, if any. */ /** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space; struct drm_mm_node *gtt_space;
struct list_head gtt_list; struct list_head gtt_list;
...@@ -1306,7 +1326,8 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, ...@@ -1306,7 +1326,8 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev); void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj); int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size); size_t size);
void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj);
...@@ -1319,7 +1340,7 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); ...@@ -1319,7 +1340,7 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev); void i915_gem_lastclose(struct drm_device *dev);
int __must_check i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj, int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to); struct intel_ring_buffer *to);
......
...@@ -1650,18 +1650,12 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) ...@@ -1650,18 +1650,12 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
return obj->madv == I915_MADV_DONTNEED; return obj->madv == I915_MADV_DONTNEED;
} }
static int static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{ {
int page_count = obj->base.size / PAGE_SIZE; int page_count = obj->base.size / PAGE_SIZE;
int ret, i; int ret, i;
BUG_ON(obj->gtt_space);
if (obj->pages == NULL)
return 0;
BUG_ON(obj->gtt_space);
BUG_ON(obj->madv == __I915_MADV_PURGED); BUG_ON(obj->madv == __I915_MADV_PURGED);
ret = i915_gem_object_set_to_cpu_domain(obj, true); ret = i915_gem_object_set_to_cpu_domain(obj, true);
...@@ -1693,9 +1687,21 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -1693,9 +1687,21 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
drm_free_large(obj->pages); drm_free_large(obj->pages);
obj->pages = NULL; obj->pages = NULL;
}
list_del(&obj->gtt_list); static int
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
const struct drm_i915_gem_object_ops *ops = obj->ops;
if (obj->sg_table || obj->pages == NULL)
return 0;
BUG_ON(obj->gtt_space);
ops->put_pages(obj);
list_del(&obj->gtt_list);
if (i915_gem_object_is_purgeable(obj)) if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj); i915_gem_object_truncate(obj);
...@@ -1712,7 +1718,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) ...@@ -1712,7 +1718,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
&dev_priv->mm.unbound_list, &dev_priv->mm.unbound_list,
gtt_list) { gtt_list) {
if (i915_gem_object_is_purgeable(obj) && if (i915_gem_object_is_purgeable(obj) &&
i915_gem_object_put_pages_gtt(obj) == 0) { i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
if (count >= target) if (count >= target)
return count; return count;
...@@ -1724,7 +1730,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) ...@@ -1724,7 +1730,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
mm_list) { mm_list) {
if (i915_gem_object_is_purgeable(obj) && if (i915_gem_object_is_purgeable(obj) &&
i915_gem_object_unbind(obj) == 0 && i915_gem_object_unbind(obj) == 0 &&
i915_gem_object_put_pages_gtt(obj) == 0) { i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
if (count >= target) if (count >= target)
return count; return count;
...@@ -1742,10 +1748,10 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv) ...@@ -1742,10 +1748,10 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
i915_gem_evict_everything(dev_priv->dev); i915_gem_evict_everything(dev_priv->dev);
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
i915_gem_object_put_pages_gtt(obj); i915_gem_object_put_pages(obj);
} }
int static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
...@@ -1754,9 +1760,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -1754,9 +1760,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct page *page; struct page *page;
gfp_t gfp; gfp_t gfp;
if (obj->pages || obj->sg_table)
return 0;
/* Assert that the object is not currently in any GPU domain. As it /* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in * wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache * a GPU cache
...@@ -1806,7 +1809,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -1806,7 +1809,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (i915_gem_object_needs_bit17_swizzle(obj)) if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj); i915_gem_object_do_bit_17_swizzle(obj);
list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
return 0; return 0;
err_pages: err_pages:
...@@ -1818,6 +1820,31 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -1818,6 +1820,31 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
return PTR_ERR(page); return PTR_ERR(page);
} }
/* Ensure that the associated pages are gathered from the backing storage
* and pinned into our object. i915_gem_object_get_pages() may be called
* multiple times before they are released by a single call to
* i915_gem_object_put_pages() - once the pages are no longer referenced
* either as a result of memory pressure (reaping pages under the shrinker)
* or as the object is itself released.
*/
int
i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
const struct drm_i915_gem_object_ops *ops = obj->ops;
int ret;
if (obj->sg_table || obj->pages)
return 0;
ret = ops->get_pages(obj);
if (ret)
return ret;
list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
return 0;
}
void void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring,
...@@ -2071,7 +2098,6 @@ void i915_gem_reset(struct drm_device *dev) ...@@ -2071,7 +2098,6 @@ void i915_gem_reset(struct drm_device *dev)
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
} }
/* The fence registers are invalidated so clear them out */ /* The fence registers are invalidated so clear them out */
i915_gem_reset_fences(dev); i915_gem_reset_fences(dev);
} }
...@@ -2871,7 +2897,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, ...@@ -2871,7 +2897,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
return -E2BIG; return -E2BIG;
} }
ret = i915_gem_object_get_pages_gtt(obj); ret = i915_gem_object_get_pages(obj);
if (ret) if (ret)
return ret; return ret;
...@@ -3610,15 +3636,16 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, ...@@ -3610,15 +3636,16 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return ret; return ret;
} }
void i915_gem_object_init(struct drm_i915_gem_object *obj) void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{ {
obj->base.driver_private = NULL;
INIT_LIST_HEAD(&obj->mm_list); INIT_LIST_HEAD(&obj->mm_list);
INIT_LIST_HEAD(&obj->gtt_list); INIT_LIST_HEAD(&obj->gtt_list);
INIT_LIST_HEAD(&obj->ring_list); INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list); INIT_LIST_HEAD(&obj->exec_list);
obj->ops = ops;
obj->fence_reg = I915_FENCE_REG_NONE; obj->fence_reg = I915_FENCE_REG_NONE;
obj->madv = I915_MADV_WILLNEED; obj->madv = I915_MADV_WILLNEED;
/* Avoid an unnecessary call to unbind on the first bind. */ /* Avoid an unnecessary call to unbind on the first bind. */
...@@ -3627,6 +3654,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj) ...@@ -3627,6 +3654,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj)
i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
} }
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.get_pages = i915_gem_object_get_pages_gtt,
.put_pages = i915_gem_object_put_pages_gtt,
};
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size) size_t size)
{ {
...@@ -3653,7 +3685,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, ...@@ -3653,7 +3685,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
mapping_set_gfp_mask(mapping, mask); mapping_set_gfp_mask(mapping, mask);
i915_gem_object_init(obj); i915_gem_object_init(obj, &i915_gem_object_ops);
obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU; obj->base.read_domains = I915_GEM_DOMAIN_CPU;
...@@ -3711,7 +3743,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) ...@@ -3711,7 +3743,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
dev_priv->mm.interruptible = was_interruptible; dev_priv->mm.interruptible = was_interruptible;
} }
i915_gem_object_put_pages_gtt(obj); i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj); i915_gem_object_free_mmap_offset(obj);
drm_gem_object_release(&obj->base); drm_gem_object_release(&obj->base);
......
...@@ -41,7 +41,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme ...@@ -41,7 +41,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
ret = i915_gem_object_get_pages_gtt(obj); ret = i915_gem_object_get_pages(obj);
if (ret) { if (ret) {
sg = ERR_PTR(ret); sg = ERR_PTR(ret);
goto out; goto out;
...@@ -89,7 +89,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) ...@@ -89,7 +89,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
goto out_unlock; goto out_unlock;
} }
ret = i915_gem_object_get_pages_gtt(obj); ret = i915_gem_object_get_pages(obj);
if (ret) { if (ret) {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ERR_PTR(ret); return ERR_PTR(ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment