Commit b4b9731b authored by Thomas Hellström's avatar Thomas Hellström Committed by Matthew Auld

drm/i915: Simplify userptr locking

Use an rwlock instead of spinlock for the global notifier lock
to reduce risk of contention in execbuf.

Protect object state with the object lock whenever possible rather
than with the global notifier lock

Don't take an explicit page_ref in userptr_submit_init() but rather
call get_pages() after obtaining the page list so that
get_pages() holds the page_ref. This means we don't need to call
userptr_submit_fini(), which is needed to avoid awkward locking
in our upcoming VM_BIND code.
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210610143525.624677-1-thomas.hellstrom@linux.intel.com
parent 35c6367f
...@@ -994,7 +994,7 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) ...@@ -994,7 +994,7 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
} }
} }
static void eb_release_vmas(struct i915_execbuffer *eb, bool final, bool release_userptr) static void eb_release_vmas(struct i915_execbuffer *eb, bool final)
{ {
const unsigned int count = eb->buffer_count; const unsigned int count = eb->buffer_count;
unsigned int i; unsigned int i;
...@@ -1008,11 +1008,6 @@ static void eb_release_vmas(struct i915_execbuffer *eb, bool final, bool release ...@@ -1008,11 +1008,6 @@ static void eb_release_vmas(struct i915_execbuffer *eb, bool final, bool release
eb_unreserve_vma(ev); eb_unreserve_vma(ev);
if (release_userptr && ev->flags & __EXEC_OBJECT_USERPTR_INIT) {
ev->flags &= ~__EXEC_OBJECT_USERPTR_INIT;
i915_gem_object_userptr_submit_fini(vma->obj);
}
if (final) if (final)
i915_vma_put(vma); i915_vma_put(vma);
} }
...@@ -1990,7 +1985,7 @@ static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb, ...@@ -1990,7 +1985,7 @@ static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,
} }
/* We may process another execbuffer during the unlock... */ /* We may process another execbuffer during the unlock... */
eb_release_vmas(eb, false, true); eb_release_vmas(eb, false);
i915_gem_ww_ctx_fini(&eb->ww); i915_gem_ww_ctx_fini(&eb->ww);
if (rq) { if (rq) {
...@@ -2094,7 +2089,7 @@ static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb, ...@@ -2094,7 +2089,7 @@ static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,
err: err:
if (err == -EDEADLK) { if (err == -EDEADLK) {
eb_release_vmas(eb, false, false); eb_release_vmas(eb, false);
err = i915_gem_ww_ctx_backoff(&eb->ww); err = i915_gem_ww_ctx_backoff(&eb->ww);
if (!err) if (!err)
goto repeat_validate; goto repeat_validate;
...@@ -2191,7 +2186,7 @@ static int eb_relocate_parse(struct i915_execbuffer *eb) ...@@ -2191,7 +2186,7 @@ static int eb_relocate_parse(struct i915_execbuffer *eb)
err: err:
if (err == -EDEADLK) { if (err == -EDEADLK) {
eb_release_vmas(eb, false, false); eb_release_vmas(eb, false);
err = i915_gem_ww_ctx_backoff(&eb->ww); err = i915_gem_ww_ctx_backoff(&eb->ww);
if (!err) if (!err)
goto retry; goto retry;
...@@ -2268,7 +2263,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) ...@@ -2268,7 +2263,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
#ifdef CONFIG_MMU_NOTIFIER #ifdef CONFIG_MMU_NOTIFIER
if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) { if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) {
spin_lock(&eb->i915->mm.notifier_lock); read_lock(&eb->i915->mm.notifier_lock);
/* /*
* count is always at least 1, otherwise __EXEC_USERPTR_USED * count is always at least 1, otherwise __EXEC_USERPTR_USED
...@@ -2286,7 +2281,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) ...@@ -2286,7 +2281,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
break; break;
} }
spin_unlock(&eb->i915->mm.notifier_lock); read_unlock(&eb->i915->mm.notifier_lock);
} }
#endif #endif
...@@ -3435,7 +3430,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -3435,7 +3430,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
err = eb_lookup_vmas(&eb); err = eb_lookup_vmas(&eb);
if (err) { if (err) {
eb_release_vmas(&eb, true, true); eb_release_vmas(&eb, true);
goto err_engine; goto err_engine;
} }
...@@ -3528,7 +3523,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, ...@@ -3528,7 +3523,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
i915_request_put(eb.request); i915_request_put(eb.request);
err_vma: err_vma:
eb_release_vmas(&eb, true, true); eb_release_vmas(&eb, true);
if (eb.trampoline) if (eb.trampoline)
i915_vma_unpin(eb.trampoline); i915_vma_unpin(eb.trampoline);
WARN_ON(err == -EDEADLK); WARN_ON(err == -EDEADLK);
......
...@@ -609,14 +609,12 @@ i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) ...@@ -609,14 +609,12 @@ i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj); int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj); int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj);
int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj); int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
#else #else
static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; } static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; } static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; } static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
static inline void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); }
static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; } static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
#endif #endif
......
...@@ -67,11 +67,11 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, ...@@ -67,11 +67,11 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
if (!mmu_notifier_range_blockable(range)) if (!mmu_notifier_range_blockable(range))
return false; return false;
spin_lock(&i915->mm.notifier_lock); write_lock(&i915->mm.notifier_lock);
mmu_interval_set_seq(mni, cur_seq); mmu_interval_set_seq(mni, cur_seq);
spin_unlock(&i915->mm.notifier_lock); write_unlock(&i915->mm.notifier_lock);
/* /*
* We don't wait when the process is exiting. This is valid * We don't wait when the process is exiting. This is valid
...@@ -107,16 +107,15 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj) ...@@ -107,16 +107,15 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)
static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj) static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct page **pvec = NULL; struct page **pvec = NULL;
spin_lock(&i915->mm.notifier_lock); assert_object_held_shared(obj);
if (!--obj->userptr.page_ref) { if (!--obj->userptr.page_ref) {
pvec = obj->userptr.pvec; pvec = obj->userptr.pvec;
obj->userptr.pvec = NULL; obj->userptr.pvec = NULL;
} }
GEM_BUG_ON(obj->userptr.page_ref < 0); GEM_BUG_ON(obj->userptr.page_ref < 0);
spin_unlock(&i915->mm.notifier_lock);
if (pvec) { if (pvec) {
const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
...@@ -128,7 +127,6 @@ static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj) ...@@ -128,7 +127,6 @@ static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
unsigned int max_segment = i915_sg_segment_size(); unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st; struct sg_table *st;
...@@ -141,16 +139,13 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -141,16 +139,13 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (!st) if (!st)
return -ENOMEM; return -ENOMEM;
spin_lock(&i915->mm.notifier_lock); if (!obj->userptr.page_ref) {
if (GEM_WARN_ON(!obj->userptr.page_ref)) { ret = -EAGAIN;
spin_unlock(&i915->mm.notifier_lock);
ret = -EFAULT;
goto err_free; goto err_free;
} }
obj->userptr.page_ref++; obj->userptr.page_ref++;
pvec = obj->userptr.pvec; pvec = obj->userptr.pvec;
spin_unlock(&i915->mm.notifier_lock);
alloc_table: alloc_table:
sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0, sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
...@@ -241,7 +236,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, ...@@ -241,7 +236,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
i915_gem_object_userptr_drop_ref(obj); i915_gem_object_userptr_drop_ref(obj);
} }
static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool get_pages) static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj)
{ {
struct sg_table *pages; struct sg_table *pages;
int err; int err;
...@@ -259,15 +254,11 @@ static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool ...@@ -259,15 +254,11 @@ static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool
if (!IS_ERR_OR_NULL(pages)) if (!IS_ERR_OR_NULL(pages))
i915_gem_userptr_put_pages(obj, pages); i915_gem_userptr_put_pages(obj, pages);
if (get_pages)
err = ____i915_gem_object_get_pages(obj);
return err; return err;
} }
int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
struct page **pvec; struct page **pvec;
unsigned int gup_flags = 0; unsigned int gup_flags = 0;
...@@ -277,39 +268,22 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) ...@@ -277,39 +268,22 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
if (obj->userptr.notifier.mm != current->mm) if (obj->userptr.notifier.mm != current->mm)
return -EFAULT; return -EFAULT;
notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
ret = i915_gem_object_lock_interruptible(obj, NULL); ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret) if (ret)
return ret; return ret;
/* optimistically try to preserve current pages while unlocked */ if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) {
if (i915_gem_object_has_pages(obj) && i915_gem_object_unlock(obj);
!mmu_interval_check_retry(&obj->userptr.notifier, return 0;
obj->userptr.notifier_seq)) {
spin_lock(&i915->mm.notifier_lock);
if (obj->userptr.pvec &&
!mmu_interval_read_retry(&obj->userptr.notifier,
obj->userptr.notifier_seq)) {
obj->userptr.page_ref++;
/* We can keep using the current binding, this is the fastpath */
ret = 1;
}
spin_unlock(&i915->mm.notifier_lock);
} }
if (!ret) { ret = i915_gem_object_userptr_unbind(obj);
/* Make sure userptr is unbound for next attempt, so we don't use stale pages. */
ret = i915_gem_object_userptr_unbind(obj, false);
}
i915_gem_object_unlock(obj); i915_gem_object_unlock(obj);
if (ret < 0) if (ret)
return ret; return ret;
if (ret > 0)
return 0;
notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
if (!pvec) if (!pvec)
return -ENOMEM; return -ENOMEM;
...@@ -329,7 +303,9 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) ...@@ -329,7 +303,9 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
} }
ret = 0; ret = 0;
spin_lock(&i915->mm.notifier_lock); ret = i915_gem_object_lock_interruptible(obj, NULL);
if (ret)
goto out;
if (mmu_interval_read_retry(&obj->userptr.notifier, if (mmu_interval_read_retry(&obj->userptr.notifier,
!obj->userptr.page_ref ? notifier_seq : !obj->userptr.page_ref ? notifier_seq :
...@@ -341,12 +317,14 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) ...@@ -341,12 +317,14 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
if (!obj->userptr.page_ref++) { if (!obj->userptr.page_ref++) {
obj->userptr.pvec = pvec; obj->userptr.pvec = pvec;
obj->userptr.notifier_seq = notifier_seq; obj->userptr.notifier_seq = notifier_seq;
pvec = NULL; pvec = NULL;
ret = ____i915_gem_object_get_pages(obj);
} }
obj->userptr.page_ref--;
out_unlock: out_unlock:
spin_unlock(&i915->mm.notifier_lock); i915_gem_object_unlock(obj);
out: out:
if (pvec) { if (pvec) {
...@@ -369,11 +347,6 @@ int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) ...@@ -369,11 +347,6 @@ int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj)
return 0; return 0;
} }
void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj)
{
i915_gem_object_userptr_drop_ref(obj);
}
int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
{ {
int err; int err;
...@@ -396,7 +369,6 @@ int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) ...@@ -396,7 +369,6 @@ int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
i915_gem_object_unlock(obj); i915_gem_object_unlock(obj);
} }
i915_gem_object_userptr_submit_fini(obj);
return err; return err;
} }
...@@ -572,7 +544,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, ...@@ -572,7 +544,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
int i915_gem_init_userptr(struct drm_i915_private *dev_priv) int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
{ {
#ifdef CONFIG_MMU_NOTIFIER #ifdef CONFIG_MMU_NOTIFIER
spin_lock_init(&dev_priv->mm.notifier_lock); rwlock_init(&dev_priv->mm.notifier_lock);
#endif #endif
return 0; return 0;
......
...@@ -552,7 +552,7 @@ struct i915_gem_mm { ...@@ -552,7 +552,7 @@ struct i915_gem_mm {
* notifier_lock for mmu notifiers, memory may not be allocated * notifier_lock for mmu notifiers, memory may not be allocated
* while holding this lock. * while holding this lock.
*/ */
spinlock_t notifier_lock; rwlock_t notifier_lock;
#endif #endif
/* shrinker accounting, also useful for userland debugging */ /* shrinker accounting, also useful for userland debugging */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment