Commit 03ac84f1 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Pass around sg_table to get_pages/put_pages backend

The plan is to move obj->pages out from under the struct_mutex into its
own per-object lock. We need to prune any assumption of the struct_mutex
from the get_pages/put_pages backends, and to make it easier we pass
around the sg_table to operate on rather than indirectly via the obj.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-13-chris@chris-wilson.co.uk
parent a4f5ea64
...@@ -2185,8 +2185,8 @@ struct drm_i915_gem_object_ops { ...@@ -2185,8 +2185,8 @@ struct drm_i915_gem_object_ops {
* being released or under memory pressure (where we attempt to * being released or under memory pressure (where we attempt to
* reap pages for the shrinker). * reap pages for the shrinker).
*/ */
int (*get_pages)(struct drm_i915_gem_object *); struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *); void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
int (*dmabuf_export)(struct drm_i915_gem_object *); int (*dmabuf_export)(struct drm_i915_gem_object *);
void (*release)(struct drm_i915_gem_object *); void (*release)(struct drm_i915_gem_object *);
...@@ -2321,8 +2321,6 @@ struct drm_i915_gem_object { ...@@ -2321,8 +2321,6 @@ struct drm_i915_gem_object {
struct i915_gem_userptr { struct i915_gem_userptr {
uintptr_t ptr; uintptr_t ptr;
unsigned read_only :1; unsigned read_only :1;
unsigned workers :4;
#define I915_GEM_USERPTR_MAX_WORKERS 15
struct i915_mm_struct *mm; struct i915_mm_struct *mm;
struct i915_mmu_object *mmu_object; struct i915_mmu_object *mmu_object;
...@@ -2383,6 +2381,19 @@ i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj) ...@@ -2383,6 +2381,19 @@ i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj)
__deprecated __deprecated
extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *); extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
static inline bool
i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
{
return atomic_read(&obj->base.refcount.refcount) == 0;
}
#if IS_ENABLED(CONFIG_LOCKDEP)
#define lockdep_assert_held_unless(lock, cond) \
GEM_BUG_ON(debug_locks && !lockdep_is_held(lock) && !(cond))
#else
#define lockdep_assert_held_unless(lock, cond)
#endif
static inline bool static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{ {
...@@ -3211,6 +3222,8 @@ dma_addr_t ...@@ -3211,6 +3222,8 @@ dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
unsigned long n); unsigned long n);
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline int __must_check static inline int __must_check
...@@ -3227,7 +3240,8 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) ...@@ -3227,7 +3240,8 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
static inline void static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{ {
lockdep_assert_held(&obj->base.dev->struct_mutex); lockdep_assert_held_unless(&obj->base.dev->struct_mutex,
i915_gem_object_is_dead(obj));
GEM_BUG_ON(!obj->mm.pages); GEM_BUG_ON(!obj->mm.pages);
obj->mm.pages_pin_count++; obj->mm.pages_pin_count++;
...@@ -3242,7 +3256,8 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) ...@@ -3242,7 +3256,8 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
static inline void static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{ {
lockdep_assert_held(&obj->base.dev->struct_mutex); lockdep_assert_held_unless(&obj->base.dev->struct_mutex,
i915_gem_object_is_dead(obj));
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
GEM_BUG_ON(!obj->mm.pages); GEM_BUG_ON(!obj->mm.pages);
...@@ -3255,7 +3270,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) ...@@ -3255,7 +3270,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_unpin_pages(obj); __i915_gem_object_unpin_pages(obj);
} }
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
enum i915_map_type { enum i915_map_type {
I915_MAP_WB = 0, I915_MAP_WB = 0,
...@@ -3480,8 +3496,10 @@ i915_vma_unpin_fence(struct i915_vma *vma) ...@@ -3480,8 +3496,10 @@ i915_vma_unpin_fence(struct i915_vma *vma)
void i915_gem_restore_fences(struct drm_device *dev); void i915_gem_restore_fences(struct drm_device *dev);
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); struct sg_table *pages);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
/* i915_gem_context.c */ /* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev); int __must_check i915_gem_context_init(struct drm_device *dev);
......
...@@ -169,7 +169,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, ...@@ -169,7 +169,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
} }
static int static struct sg_table *
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{ {
struct address_space *mapping = obj->base.filp->f_mapping; struct address_space *mapping = obj->base.filp->f_mapping;
...@@ -179,7 +179,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) ...@@ -179,7 +179,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
int i; int i;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return -EINVAL; return ERR_PTR(-EINVAL);
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page; struct page *page;
...@@ -187,7 +187,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) ...@@ -187,7 +187,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
page = shmem_read_mapping_page(mapping, i); page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page)) if (IS_ERR(page))
return PTR_ERR(page); return ERR_CAST(page);
src = kmap_atomic(page); src = kmap_atomic(page);
memcpy(vaddr, src, PAGE_SIZE); memcpy(vaddr, src, PAGE_SIZE);
...@@ -202,11 +202,11 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) ...@@ -202,11 +202,11 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL) if (st == NULL)
return -ENOMEM; return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, 1, GFP_KERNEL)) { if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st); kfree(st);
return -ENOMEM; return ERR_PTR(-ENOMEM);
} }
sg = st->sgl; sg = st->sgl;
...@@ -216,28 +216,30 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) ...@@ -216,28 +216,30 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
sg_dma_address(sg) = obj->phys_handle->busaddr; sg_dma_address(sg) = obj->phys_handle->busaddr;
sg_dma_len(sg) = obj->base.size; sg_dma_len(sg) = obj->base.size;
obj->mm.pages = st; return st;
return 0;
} }
static void static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj)
{ {
int ret;
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (WARN_ON(ret)) {
/* In the event of a disaster, abandon all caches and
* hope for the best.
*/
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
if (obj->mm.madv == I915_MADV_DONTNEED) if (obj->mm.madv == I915_MADV_DONTNEED)
obj->mm.dirty = false; obj->mm.dirty = false;
if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
i915_gem_clflush_object(obj, false);
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
__i915_gem_object_release_shmem(obj);
if (obj->mm.dirty) { if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping; struct address_space *mapping = obj->base.filp->f_mapping;
char *vaddr = obj->phys_handle->vaddr; char *vaddr = obj->phys_handle->vaddr;
...@@ -265,8 +267,8 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) ...@@ -265,8 +267,8 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
obj->mm.dirty = false; obj->mm.dirty = false;
} }
sg_free_table(obj->mm.pages); sg_free_table(pages);
kfree(obj->mm.pages); kfree(pages);
} }
static void static void
...@@ -518,9 +520,9 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, ...@@ -518,9 +520,9 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
ret = __i915_gem_object_put_pages(obj); __i915_gem_object_put_pages(obj);
if (ret) if (obj->mm.pages)
return ret; return -EBUSY;
/* create a new object */ /* create a new object */
phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
...@@ -536,7 +538,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, ...@@ -536,7 +538,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
static int static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args, struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset; void *vaddr = obj->phys_handle->vaddr + args->offset;
...@@ -552,7 +554,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, ...@@ -552,7 +554,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
I915_WAIT_LOCKED | I915_WAIT_LOCKED |
I915_WAIT_ALL, I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT, MAX_SCHEDULE_TIMEOUT,
to_rps_client(file_priv)); to_rps_client(file));
if (ret) if (ret)
return ret; return ret;
...@@ -2263,8 +2265,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) ...@@ -2263,8 +2265,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
} }
/* Try to discard unwanted pages */ /* Try to discard unwanted pages */
static void void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
{ {
struct address_space *mapping; struct address_space *mapping;
...@@ -2283,32 +2284,20 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj) ...@@ -2283,32 +2284,20 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
} }
static void static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{ {
struct sgt_iter sgt_iter; struct sgt_iter sgt_iter;
struct page *page; struct page *page;
int ret;
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); __i915_gem_object_release_shmem(obj);
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (WARN_ON(ret)) {
/* In the event of a disaster, abandon all caches and
* hope for the best.
*/
i915_gem_clflush_object(obj, true);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
i915_gem_gtt_finish_object(obj); i915_gem_gtt_finish_pages(obj, pages);
if (i915_gem_object_needs_bit17_swizzle(obj)) if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj); i915_gem_object_save_bit_17_swizzle(obj, pages);
if (obj->mm.madv == I915_MADV_DONTNEED)
obj->mm.dirty = false;
for_each_sgt_page(page, sgt_iter, obj->mm.pages) { for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty) if (obj->mm.dirty)
set_page_dirty(page); set_page_dirty(page);
...@@ -2319,8 +2308,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2319,8 +2308,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
} }
obj->mm.dirty = false; obj->mm.dirty = false;
sg_free_table(obj->mm.pages); sg_free_table(pages);
kfree(obj->mm.pages); kfree(pages);
} }
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
...@@ -2332,24 +2321,22 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) ...@@ -2332,24 +2321,22 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
radix_tree_delete(&obj->mm.get_page.radix, iter.index); radix_tree_delete(&obj->mm.get_page.radix, iter.index);
} }
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{ {
const struct drm_i915_gem_object_ops *ops = obj->ops; struct sg_table *pages;
lockdep_assert_held(&obj->base.dev->struct_mutex); lockdep_assert_held(&obj->base.dev->struct_mutex);
if (!obj->mm.pages)
return 0;
if (i915_gem_object_has_pinned_pages(obj)) if (i915_gem_object_has_pinned_pages(obj))
return -EBUSY; return;
GEM_BUG_ON(obj->bind_count); GEM_BUG_ON(obj->bind_count);
/* ->put_pages might need to allocate memory for the bit17 swizzle /* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt * array, hence protect them from being reaped by removing them from gtt
* lists early. */ * lists early. */
list_del(&obj->global_list); pages = fetch_and_zero(&obj->mm.pages);
GEM_BUG_ON(!pages);
if (obj->mm.mapping) { if (obj->mm.mapping) {
void *ptr; void *ptr;
...@@ -2365,12 +2352,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) ...@@ -2365,12 +2352,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj); __i915_gem_object_reset_page_iter(obj);
ops->put_pages(obj); obj->ops->put_pages(obj, pages);
obj->mm.pages = NULL;
i915_gem_object_invalidate(obj);
return 0;
} }
static unsigned int swiotlb_max_size(void) static unsigned int swiotlb_max_size(void)
...@@ -2382,7 +2364,7 @@ static unsigned int swiotlb_max_size(void) ...@@ -2382,7 +2364,7 @@ static unsigned int swiotlb_max_size(void)
#endif #endif
} }
static int static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
...@@ -2401,8 +2383,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2401,8 +2383,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* wasn't in the GTT, there shouldn't be any way it could have been in * wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache * a GPU cache
*/ */
BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
max_segment = swiotlb_max_size(); max_segment = swiotlb_max_size();
if (!max_segment) if (!max_segment)
...@@ -2410,12 +2392,12 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2410,12 +2392,12 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL) if (st == NULL)
return -ENOMEM; return ERR_PTR(-ENOMEM);
page_count = obj->base.size / PAGE_SIZE; page_count = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, page_count, GFP_KERNEL)) { if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st); kfree(st);
return -ENOMEM; return ERR_PTR(-ENOMEM);
} }
/* Get the list of pages out of our struct file. They'll be pinned /* Get the list of pages out of our struct file. They'll be pinned
...@@ -2466,20 +2448,19 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2466,20 +2448,19 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
} }
if (sg) /* loop terminated early; short sg table */ if (sg) /* loop terminated early; short sg table */
sg_mark_end(sg); sg_mark_end(sg);
obj->mm.pages = st;
ret = i915_gem_gtt_prepare_object(obj); ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret) if (ret)
goto err_pages; goto err_pages;
if (i915_gem_object_needs_bit17_swizzle(obj)) if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj); i915_gem_object_do_bit_17_swizzle(obj, st);
if (i915_gem_object_is_tiled(obj) && if (i915_gem_object_is_tiled(obj) &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
__i915_gem_object_pin_pages(obj); __i915_gem_object_pin_pages(obj);
return 0; return st;
err_pages: err_pages:
sg_mark_end(sg); sg_mark_end(sg);
...@@ -2499,7 +2480,35 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2499,7 +2480,35 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (ret == -ENOSPC) if (ret == -ENOSPC)
ret = -ENOMEM; ret = -ENOMEM;
return ret; return ERR_PTR(ret);
}
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
obj->mm.get_page.sg_pos = pages->sgl;
obj->mm.get_page.sg_idx = 0;
obj->mm.pages = pages;
}
static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT;
}
pages = obj->ops->get_pages(obj);
if (unlikely(IS_ERR(pages)))
return PTR_ERR(pages);
__i915_gem_object_set_pages(obj, pages);
return 0;
} }
/* Ensure that the associated pages are gathered from the backing storage /* Ensure that the associated pages are gathered from the backing storage
...@@ -2511,33 +2520,18 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2511,33 +2520,18 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
*/ */
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); int err;
const struct drm_i915_gem_object_ops *ops = obj->ops;
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex); lockdep_assert_held(&obj->base.dev->struct_mutex);
if (obj->mm.pages) if (obj->mm.pages)
return 0; return 0;
if (obj->mm.madv != I915_MADV_WILLNEED) { err = ____i915_gem_object_get_pages(obj);
DRM_DEBUG("Attempting to obtain a purgeable object\n"); if (err)
__i915_gem_object_unpin_pages(obj);
return -EFAULT;
}
ret = ops->get_pages(obj);
if (ret) {
__i915_gem_object_unpin_pages(obj); __i915_gem_object_unpin_pages(obj);
return ret;
}
list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list); return err;
obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
obj->mm.get_page.sg_idx = 0;
return 0;
} }
/* The 'mapping' part of i915_gem_object_pin_map() below */ /* The 'mapping' part of i915_gem_object_pin_map() below */
......
...@@ -289,22 +289,18 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, ...@@ -289,22 +289,18 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return dma_buf; return dma_buf;
} }
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) static struct sg_table *
i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{ {
struct sg_table *sg; return dma_buf_map_attachment(obj->base.import_attach,
DMA_BIDIRECTIONAL);
sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sg))
return PTR_ERR(sg);
obj->mm.pages = sg;
return 0;
} }
static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj) static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{ {
dma_buf_unmap_attachment(obj->base.import_attach, dma_buf_unmap_attachment(obj->base.import_attach, pages,
obj->mm.pages, DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
} }
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
......
...@@ -644,6 +644,7 @@ i915_gem_swizzle_page(struct page *page) ...@@ -644,6 +644,7 @@ i915_gem_swizzle_page(struct page *page)
/** /**
* i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
* @obj: i915 GEM buffer object * @obj: i915 GEM buffer object
* @pages: the scattergather list of physical pages
* *
* This function fixes up the swizzling in case any page frame number for this * This function fixes up the swizzling in case any page frame number for this
* object has changed in bit 17 since that state has been saved with * object has changed in bit 17 since that state has been saved with
...@@ -654,7 +655,8 @@ i915_gem_swizzle_page(struct page *page) ...@@ -654,7 +655,8 @@ i915_gem_swizzle_page(struct page *page)
* by swapping them out and back in again). * by swapping them out and back in again).
*/ */
void void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{ {
struct sgt_iter sgt_iter; struct sgt_iter sgt_iter;
struct page *page; struct page *page;
...@@ -664,10 +666,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) ...@@ -664,10 +666,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
return; return;
i = 0; i = 0;
for_each_sgt_page(page, sgt_iter, obj->mm.pages) { for_each_sgt_page(page, sgt_iter, pages) {
char new_bit_17 = page_to_phys(page) >> 17; char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) != if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
(test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(page); i915_gem_swizzle_page(page);
set_page_dirty(page); set_page_dirty(page);
} }
...@@ -678,17 +679,19 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) ...@@ -678,17 +679,19 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
/** /**
* i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
* @obj: i915 GEM buffer object * @obj: i915 GEM buffer object
* @pages: the scattergather list of physical pages
* *
* This function saves the bit 17 of each page frame number so that swizzling * This function saves the bit 17 of each page frame number so that swizzling
* can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
* be called before the backing storage can be unpinned. * be called before the backing storage can be unpinned.
*/ */
void void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{ {
const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
struct sgt_iter sgt_iter; struct sgt_iter sgt_iter;
struct page *page; struct page *page;
int page_count = obj->base.size >> PAGE_SHIFT;
int i; int i;
if (obj->bit_17 == NULL) { if (obj->bit_17 == NULL) {
...@@ -703,7 +706,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) ...@@ -703,7 +706,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
i = 0; i = 0;
for_each_sgt_page(page, sgt_iter, obj->mm.pages) { for_each_sgt_page(page, sgt_iter, pages) {
if (page_to_phys(page) & (1 << 17)) if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17); __set_bit(i, obj->bit_17);
else else
......
...@@ -2370,14 +2370,15 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) ...@@ -2370,14 +2370,15 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
i915_ggtt_flush(dev_priv); i915_ggtt_flush(dev_priv);
} }
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{ {
if (!dma_map_sg(&obj->base.dev->pdev->dev, if (dma_map_sg(&obj->base.dev->pdev->dev,
obj->mm.pages->sgl, obj->mm.pages->nents, pages->sgl, pages->nents,
PCI_DMA_BIDIRECTIONAL)) PCI_DMA_BIDIRECTIONAL))
return -ENOSPC; return 0;
return 0; return -ENOSPC;
} }
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
...@@ -2696,7 +2697,8 @@ static void ggtt_unbind_vma(struct i915_vma *vma) ...@@ -2696,7 +2697,8 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
vma->node.start, size); vma->node.start, size);
} }
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{ {
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct device *kdev = &dev_priv->drm.pdev->dev; struct device *kdev = &dev_priv->drm.pdev->dev;
...@@ -2710,8 +2712,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) ...@@ -2710,8 +2712,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
} }
} }
dma_unmap_sg(kdev, obj->mm.pages->sgl, obj->mm.pages->nents, dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
PCI_DMA_BIDIRECTIONAL);
} }
static void i915_gtt_color_adjust(struct drm_mm_node *node, static void i915_gtt_color_adjust(struct drm_mm_node *node,
......
...@@ -628,8 +628,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv); ...@@ -628,8 +628,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev); void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev); void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); struct sg_table *pages);
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
/* Flags used by pin/bind&friends. */ /* Flags used by pin/bind&friends. */
#define PIN_NONBLOCK BIT(0) #define PIN_NONBLOCK BIT(0)
......
...@@ -42,7 +42,8 @@ static void internal_free_pages(struct sg_table *st) ...@@ -42,7 +42,8 @@ static void internal_free_pages(struct sg_table *st)
kfree(st); kfree(st);
} }
static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) static struct sg_table *
i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned int npages = obj->base.size / PAGE_SIZE; unsigned int npages = obj->base.size / PAGE_SIZE;
...@@ -53,11 +54,11 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -53,11 +54,11 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st) if (!st)
return -ENOMEM; return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, npages, GFP_KERNEL)) { if (sg_alloc_table(st, npages, GFP_KERNEL)) {
kfree(st); kfree(st);
return -ENOMEM; return ERR_PTR(-ENOMEM);
} }
sg = st->sgl; sg = st->sgl;
...@@ -102,12 +103,9 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -102,12 +103,9 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
sg = __sg_next(sg); sg = __sg_next(sg);
} while (1); } while (1);
obj->mm.pages = st;
if (i915_gem_gtt_prepare_object(obj)) { if (i915_gem_gtt_prepare_pages(obj, st))
obj->mm.pages = NULL;
goto err; goto err;
}
/* Mark the pages as dontneed whilst they are still pinned. As soon /* Mark the pages as dontneed whilst they are still pinned. As soon
* as they are unpinned they are allowed to be reaped by the shrinker, * as they are unpinned they are allowed to be reaped by the shrinker,
...@@ -115,18 +113,19 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -115,18 +113,19 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
* object are only valid whilst active and pinned. * object are only valid whilst active and pinned.
*/ */
obj->mm.madv = I915_MADV_DONTNEED; obj->mm.madv = I915_MADV_DONTNEED;
return 0; return st;
err: err:
sg_mark_end(sg); sg_mark_end(sg);
internal_free_pages(st); internal_free_pages(st);
return -ENOMEM; return ERR_PTR(-ENOMEM);
} }
static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj) static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{ {
i915_gem_gtt_finish_object(obj); i915_gem_gtt_finish_pages(obj, pages);
internal_free_pages(obj->mm.pages); internal_free_pages(pages);
obj->mm.dirty = false; obj->mm.dirty = false;
obj->mm.madv = I915_MADV_WILLNEED; obj->mm.madv = I915_MADV_WILLNEED;
......
...@@ -91,6 +91,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) ...@@ -91,6 +91,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
} }
static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
{
if (i915_gem_object_unbind(obj) == 0)
__i915_gem_object_put_pages(obj);
return !obj->mm.pages;
}
/** /**
* i915_gem_shrink - Shrink buffer object caches * i915_gem_shrink - Shrink buffer object caches
* @dev_priv: i915 device * @dev_priv: i915 device
...@@ -192,9 +199,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -192,9 +199,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
i915_gem_object_get(obj); i915_gem_object_get(obj);
/* For the unbound phase, this should be a no-op! */ if (unsafe_drop_pages(obj))
i915_gem_object_unbind(obj);
if (__i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put(obj); i915_gem_object_put(obj);
......
...@@ -546,17 +546,20 @@ i915_pages_create_for_stolen(struct drm_device *dev, ...@@ -546,17 +546,20 @@ i915_pages_create_for_stolen(struct drm_device *dev,
return st; return st;
} }
static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) static struct sg_table *
i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{ {
BUG(); return i915_pages_create_for_stolen(obj->base.dev,
return -EINVAL; obj->stolen->start,
obj->stolen->size);
} }
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{ {
/* Should only be called during free */ /* Should only be called during free */
sg_free_table(obj->mm.pages); sg_free_table(pages);
kfree(obj->mm.pages); kfree(pages);
} }
static void static void
...@@ -591,21 +594,13 @@ _i915_gem_object_create_stolen(struct drm_device *dev, ...@@ -591,21 +594,13 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, stolen->size); drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops); i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->mm.pages = i915_pages_create_for_stolen(dev,
stolen->start,
stolen->size);
if (!obj->mm.pages)
goto cleanup;
obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
obj->mm.get_page.sg_idx = 0;
__i915_gem_object_pin_pages(obj);
obj->stolen = stolen; obj->stolen = stolen;
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE; obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
if (i915_gem_object_pin_pages(obj))
goto cleanup;
return obj; return obj;
cleanup: cleanup:
...@@ -700,10 +695,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -700,10 +695,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE) if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj; return obj;
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL); vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto err; goto err_pages;
} }
/* To simplify the initialisation sequence between KMS and GTT, /* To simplify the initialisation sequence between KMS and GTT,
...@@ -717,20 +716,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -717,20 +716,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node); ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
if (ret) { if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto err; goto err_pages;
} }
vma->pages = obj->mm.pages; vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND; vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
obj->bind_count++; obj->bind_count++;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
__i915_gem_object_pin_pages(obj);
return obj; return obj;
err_pages:
i915_gem_object_unpin_pages(obj);
err: err:
i915_gem_object_put(obj); i915_gem_object_put(obj);
return NULL; return NULL;
......
...@@ -73,11 +73,14 @@ static void cancel_userptr(struct work_struct *work) ...@@ -73,11 +73,14 @@ static void cancel_userptr(struct work_struct *work)
/* Cancel any active worker and force us to re-evaluate gup */ /* Cancel any active worker and force us to re-evaluate gup */
obj->userptr.work = NULL; obj->userptr.work = NULL;
if (obj->mm.pages) { /* We are inside a kthread context and can't be interrupted */
/* We are inside a kthread context and can't be interrupted */ if (i915_gem_object_unbind(obj) == 0)
WARN_ON(i915_gem_object_unbind(obj)); __i915_gem_object_put_pages(obj);
WARN_ON(__i915_gem_object_put_pages(obj)); WARN_ONCE(obj->mm.pages,
} "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
obj->bind_count,
obj->mm.pages_pin_count,
obj->pin_display);
i915_gem_object_put(obj); i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -426,24 +429,25 @@ st_set_pages(struct sg_table **st, struct page **pvec, int num_pages) ...@@ -426,24 +429,25 @@ st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
return ret; return ret;
} }
static int static struct sg_table *
__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj, __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
struct page **pvec, int num_pages) struct page **pvec, int num_pages)
{ {
struct sg_table *pages;
int ret; int ret;
ret = st_set_pages(&obj->mm.pages, pvec, num_pages); ret = st_set_pages(&pages, pvec, num_pages);
if (ret) if (ret)
return ret; return ERR_PTR(ret);
ret = i915_gem_gtt_prepare_object(obj); ret = i915_gem_gtt_prepare_pages(obj, pages);
if (ret) { if (ret) {
sg_free_table(obj->mm.pages); sg_free_table(pages);
kfree(obj->mm.pages); kfree(pages);
obj->mm.pages = NULL; return ERR_PTR(ret);
} }
return ret; return pages;
} }
static int static int
...@@ -525,20 +529,20 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -525,20 +529,20 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (obj->userptr.work == &work->work) { if (obj->userptr.work == &work->work) {
struct sg_table *pages = ERR_PTR(ret);
if (pinned == npages) { if (pinned == npages) {
ret = __i915_gem_userptr_set_pages(obj, pvec, npages); pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
if (ret == 0) { if (!IS_ERR(pages)) {
list_add_tail(&obj->global_list, __i915_gem_object_set_pages(obj, pages);
&to_i915(dev)->mm.unbound_list);
obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
obj->mm.get_page.sg_idx = 0;
pinned = 0; pinned = 0;
pages = NULL;
} }
} }
obj->userptr.work = ERR_PTR(ret);
obj->userptr.work = ERR_CAST(pages);
} }
obj->userptr.workers--;
i915_gem_object_put(obj); i915_gem_object_put(obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -549,7 +553,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -549,7 +553,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
kfree(work); kfree(work);
} }
static int static struct sg_table *
__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj, __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
bool *active) bool *active)
{ {
...@@ -574,15 +578,11 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj, ...@@ -574,15 +578,11 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
* that error back to this function through * that error back to this function through
* obj->userptr.work = ERR_PTR. * obj->userptr.work = ERR_PTR.
*/ */
if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
return -EAGAIN;
work = kmalloc(sizeof(*work), GFP_KERNEL); work = kmalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL) if (work == NULL)
return -ENOMEM; return ERR_PTR(-ENOMEM);
obj->userptr.work = &work->work; obj->userptr.work = &work->work;
obj->userptr.workers++;
work->obj = i915_gem_object_get(obj); work->obj = i915_gem_object_get(obj);
...@@ -593,14 +593,15 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj, ...@@ -593,14 +593,15 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
schedule_work(&work->work); schedule_work(&work->work);
*active = true; *active = true;
return -EAGAIN; return ERR_PTR(-EAGAIN);
} }
static int static struct sg_table *
i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{ {
const int num_pages = obj->base.size >> PAGE_SHIFT; const int num_pages = obj->base.size >> PAGE_SHIFT;
struct page **pvec; struct page **pvec;
struct sg_table *pages;
int pinned, ret; int pinned, ret;
bool active; bool active;
...@@ -624,15 +625,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -624,15 +625,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (obj->userptr.work) { if (obj->userptr.work) {
/* active flag should still be held for the pending work */ /* active flag should still be held for the pending work */
if (IS_ERR(obj->userptr.work)) if (IS_ERR(obj->userptr.work))
return PTR_ERR(obj->userptr.work); return ERR_CAST(obj->userptr.work);
else else
return -EAGAIN; return ERR_PTR(-EAGAIN);
} }
/* Let the mmu-notifier know that we have begun and need cancellation */ /* Let the mmu-notifier know that we have begun and need cancellation */
ret = __i915_gem_userptr_set_active(obj, true); ret = __i915_gem_userptr_set_active(obj, true);
if (ret) if (ret)
return ret; return ERR_PTR(ret);
pvec = NULL; pvec = NULL;
pinned = 0; pinned = 0;
...@@ -641,7 +642,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -641,7 +642,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
GFP_TEMPORARY); GFP_TEMPORARY);
if (pvec == NULL) { if (pvec == NULL) {
__i915_gem_userptr_set_active(obj, false); __i915_gem_userptr_set_active(obj, false);
return -ENOMEM; return ERR_PTR(-ENOMEM);
} }
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
...@@ -650,21 +651,22 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -650,21 +651,22 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
active = false; active = false;
if (pinned < 0) if (pinned < 0)
ret = pinned, pinned = 0; pages = ERR_PTR(pinned), pinned = 0;
else if (pinned < num_pages) else if (pinned < num_pages)
ret = __i915_gem_userptr_get_pages_schedule(obj, &active); pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
else else
ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
if (ret) { if (IS_ERR(pages)) {
__i915_gem_userptr_set_active(obj, active); __i915_gem_userptr_set_active(obj, active);
release_pages(pvec, pinned, 0); release_pages(pvec, pinned, 0);
} }
drm_free_large(pvec); drm_free_large(pvec);
return ret; return pages;
} }
static void static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{ {
struct sgt_iter sgt_iter; struct sgt_iter sgt_iter;
struct page *page; struct page *page;
...@@ -675,9 +677,9 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) ...@@ -675,9 +677,9 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
if (obj->mm.madv != I915_MADV_WILLNEED) if (obj->mm.madv != I915_MADV_WILLNEED)
obj->mm.dirty = false; obj->mm.dirty = false;
i915_gem_gtt_finish_object(obj); i915_gem_gtt_finish_pages(obj, pages);
for_each_sgt_page(page, sgt_iter, obj->mm.pages) { for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty) if (obj->mm.dirty)
set_page_dirty(page); set_page_dirty(page);
...@@ -686,8 +688,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) ...@@ -686,8 +688,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
} }
obj->mm.dirty = false; obj->mm.dirty = false;
sg_free_table(obj->mm.pages); sg_free_table(pages);
kfree(obj->mm.pages); kfree(pages);
} }
static void static void
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment