Commit 30004b86 authored by Matan Barak's avatar Matan Barak Committed by Doug Ledford

IB/core: Rename write flag to exclusive in rdma_core

We rename the "write" flags to "exclusive", as it's used for both
WRITE and DESTROY actions.

Fixes: 38321256 ('IB/core: Add support for idr types')
Signed-off-by: default avatarMatan Barak <matanb@mellanox.com>
Reviewed-by: default avatarSean Hefty <sean.hefty@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent b58fc804
...@@ -44,7 +44,7 @@ void uverbs_uobject_get(struct ib_uobject *uobject) ...@@ -44,7 +44,7 @@ void uverbs_uobject_get(struct ib_uobject *uobject)
kref_get(&uobject->ref); kref_get(&uobject->ref);
} }
static void uverbs_uobject_put_ref(struct kref *ref) static void uverbs_uobject_free(struct kref *ref)
{ {
struct ib_uobject *uobj = struct ib_uobject *uobj =
container_of(ref, struct ib_uobject, ref); container_of(ref, struct ib_uobject, ref);
...@@ -57,21 +57,23 @@ static void uverbs_uobject_put_ref(struct kref *ref) ...@@ -57,21 +57,23 @@ static void uverbs_uobject_put_ref(struct kref *ref)
void uverbs_uobject_put(struct ib_uobject *uobject) void uverbs_uobject_put(struct ib_uobject *uobject)
{ {
kref_put(&uobject->ref, uverbs_uobject_put_ref); kref_put(&uobject->ref, uverbs_uobject_free);
} }
static int uverbs_try_lock_object(struct ib_uobject *uobj, bool write) static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive)
{ {
/* /*
* When a read is required, we use a positive counter. Each read * When a shared access is required, we use a positive counter. Each
* request checks that the value != -1 and increment it. Write * shared access request checks that the value != -1 and increment it.
* requires an exclusive access, thus we check that the counter is * Exclusive access is required for operations like write or destroy.
* zero (nobody claimed this object) and we set it to -1. * In exclusive access mode, we check that the counter is zero (nobody
* Releasing a read lock is done by simply decreasing the counter. * claimed this object) and we set it to -1. Releasing a shared access
* As for writes, since only a single write is permitted, setting * lock is done simply by decreasing the counter. As for exclusive
* it to zero is enough for releasing it. * access locks, since only a single one of them is is allowed
* concurrently, setting the counter to zero is enough for releasing
* this lock.
*/ */
if (!write) if (!exclusive)
return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ? return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
-EBUSY : 0; -EBUSY : 0;
...@@ -135,7 +137,7 @@ static void uverbs_idr_remove_uobj(struct ib_uobject *uobj) ...@@ -135,7 +137,7 @@ static void uverbs_idr_remove_uobj(struct ib_uobject *uobj)
/* Returns the ib_uobject or an error. The caller should check for IS_ERR. */ /* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *type, static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *type,
struct ib_ucontext *ucontext, struct ib_ucontext *ucontext,
int id, bool write) int id, bool exclusive)
{ {
struct ib_uobject *uobj; struct ib_uobject *uobj;
...@@ -155,14 +157,14 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t ...@@ -155,14 +157,14 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t
static struct ib_uobject *lookup_get_fd_uobject(const struct uverbs_obj_type *type, static struct ib_uobject *lookup_get_fd_uobject(const struct uverbs_obj_type *type,
struct ib_ucontext *ucontext, struct ib_ucontext *ucontext,
int id, bool write) int id, bool exclusive)
{ {
struct file *f; struct file *f;
struct ib_uobject *uobject; struct ib_uobject *uobject;
const struct uverbs_obj_fd_type *fd_type = const struct uverbs_obj_fd_type *fd_type =
container_of(type, struct uverbs_obj_fd_type, type); container_of(type, struct uverbs_obj_fd_type, type);
if (write) if (exclusive)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
f = fget(id); f = fget(id);
...@@ -186,12 +188,12 @@ static struct ib_uobject *lookup_get_fd_uobject(const struct uverbs_obj_type *ty ...@@ -186,12 +188,12 @@ static struct ib_uobject *lookup_get_fd_uobject(const struct uverbs_obj_type *ty
struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type, struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
struct ib_ucontext *ucontext, struct ib_ucontext *ucontext,
int id, bool write) int id, bool exclusive)
{ {
struct ib_uobject *uobj; struct ib_uobject *uobj;
int ret; int ret;
uobj = type->type_class->lookup_get(type, ucontext, id, write); uobj = type->type_class->lookup_get(type, ucontext, id, exclusive);
if (IS_ERR(uobj)) if (IS_ERR(uobj))
return uobj; return uobj;
...@@ -200,7 +202,7 @@ struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type, ...@@ -200,7 +202,7 @@ struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
goto free; goto free;
} }
ret = uverbs_try_lock_object(uobj, write); ret = uverbs_try_lock_object(uobj, exclusive);
if (ret) { if (ret) {
WARN(ucontext->cleanup_reason, WARN(ucontext->cleanup_reason,
"ib_uverbs: Trying to lookup_get while cleanup context\n"); "ib_uverbs: Trying to lookup_get while cleanup context\n");
...@@ -209,7 +211,7 @@ struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type, ...@@ -209,7 +211,7 @@ struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
return uobj; return uobj;
free: free:
uobj->type->type_class->lookup_put(uobj, write); uobj->type->type_class->lookup_put(uobj, exclusive);
uverbs_uobject_put(uobj); uverbs_uobject_put(uobj);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -350,10 +352,10 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj, ...@@ -350,10 +352,10 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
return ret; return ret;
} }
static void lockdep_check(struct ib_uobject *uobj, bool write) static void lockdep_check(struct ib_uobject *uobj, bool exclusive)
{ {
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
if (write) if (exclusive)
WARN_ON(atomic_read(&uobj->usecnt) > 0); WARN_ON(atomic_read(&uobj->usecnt) > 0);
else else
WARN_ON(atomic_read(&uobj->usecnt) == -1); WARN_ON(atomic_read(&uobj->usecnt) == -1);
...@@ -465,29 +467,29 @@ void rdma_alloc_abort_uobject(struct ib_uobject *uobj) ...@@ -465,29 +467,29 @@ void rdma_alloc_abort_uobject(struct ib_uobject *uobj)
uobj->type->type_class->alloc_abort(uobj); uobj->type->type_class->alloc_abort(uobj);
} }
static void lookup_put_idr_uobject(struct ib_uobject *uobj, bool write) static void lookup_put_idr_uobject(struct ib_uobject *uobj, bool exclusive)
{ {
} }
static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool write) static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
{ {
struct file *filp = uobj->object; struct file *filp = uobj->object;
WARN_ON(write); WARN_ON(exclusive);
/* This indirectly calls uverbs_close_fd and free the object */ /* This indirectly calls uverbs_close_fd and free the object */
fput(filp); fput(filp);
} }
void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool write) void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
{ {
lockdep_check(uobj, write); lockdep_check(uobj, exclusive);
uobj->type->type_class->lookup_put(uobj, write); uobj->type->type_class->lookup_put(uobj, exclusive);
/* /*
* In order to unlock an object, either decrease its usecnt for * In order to unlock an object, either decrease its usecnt for
* read access or zero it in case of write access. See * read access or zero it in case of exclusive access. See
* uverbs_try_lock_object for locking schema information. * uverbs_try_lock_object for locking schema information.
*/ */
if (!write) if (!exclusive)
atomic_dec(&uobj->usecnt); atomic_dec(&uobj->usecnt);
else else
atomic_set(&uobj->usecnt, 0); atomic_set(&uobj->usecnt, 0);
...@@ -512,7 +514,7 @@ const struct uverbs_obj_type_class uverbs_idr_class = { ...@@ -512,7 +514,7 @@ const struct uverbs_obj_type_class uverbs_idr_class = {
* When the other thread continue - without the RCU, it would * When the other thread continue - without the RCU, it would
* access freed memory. However, the rcu_read_lock delays the free * access freed memory. However, the rcu_read_lock delays the free
* until the rcu_read_lock of the READ operation quits. Since the * until the rcu_read_lock of the READ operation quits. Since the
* write lock of the object is still taken by the DESTROY flow, the * exclusive lock of the object is still taken by the DESTROY flow, the
* READ operation will get -EBUSY and it'll just bail out. * READ operation will get -EBUSY and it'll just bail out.
*/ */
.needs_kfree_rcu = true, .needs_kfree_rcu = true,
......
...@@ -54,17 +54,18 @@ struct uverbs_obj_type_class { ...@@ -54,17 +54,18 @@ struct uverbs_obj_type_class {
* destroyed. * destroyed.
* [lookup]: Starts with lookup_get which fetches and locks the * [lookup]: Starts with lookup_get which fetches and locks the
* object. After the handler finished using the object, it * object. After the handler finished using the object, it
* needs to call lookup_put to unlock it. The write flag * needs to call lookup_put to unlock it. The exclusive
* indicates if the object is locked for exclusive access. * flag indicates if the object is locked for exclusive
* [remove]: Starts with lookup_get with write flag set. This locks * access.
* the object for exclusive access. If the handler code * [remove]: Starts with lookup_get with exclusive flag set. This
* completed successfully, remove_commit is called and * locks the object for exclusive access. If the handler
* the ib_uobject is removed from the context's uobjects * code completed successfully, remove_commit is called
* repository and put. The object itself is destroyed as * and the ib_uobject is removed from the context's
* well. Once remove succeeds new krefs to the object * uobjects repository and put. The object itself is
* cannot be acquired by other threads or userspace and * destroyed as well. Once remove succeeds new krefs to
* the hardware driver is removed from the object. * the object cannot be acquired by other threads or
* Other krefs on the object may still exist. * userspace and the hardware driver is removed from the
* object. Other krefs on the object may still exist.
* If the handler code failed, lookup_put should be * If the handler code failed, lookup_put should be
* called. This callback is used when the context * called. This callback is used when the context
* is destroyed as well (process termination, * is destroyed as well (process termination,
...@@ -77,10 +78,10 @@ struct uverbs_obj_type_class { ...@@ -77,10 +78,10 @@ struct uverbs_obj_type_class {
struct ib_uobject *(*lookup_get)(const struct uverbs_obj_type *type, struct ib_uobject *(*lookup_get)(const struct uverbs_obj_type *type,
struct ib_ucontext *ucontext, int id, struct ib_ucontext *ucontext, int id,
bool write); bool exclusive);
void (*lookup_put)(struct ib_uobject *uobj, bool write); void (*lookup_put)(struct ib_uobject *uobj, bool exclusive);
/* /*
* Must be called with the write lock held. If successful uobj is * Must be called with the exclusive lock held. If successful uobj is
* invalid on return. On failure uobject is left completely * invalid on return. On failure uobject is left completely
* unchanged * unchanged
*/ */
...@@ -121,8 +122,8 @@ struct uverbs_obj_idr_type { ...@@ -121,8 +122,8 @@ struct uverbs_obj_idr_type {
struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type, struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
struct ib_ucontext *ucontext, struct ib_ucontext *ucontext,
int id, bool write); int id, bool exclusive);
void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool write); void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive);
struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_obj_type *type, struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_obj_type *type,
struct ib_ucontext *ucontext); struct ib_ucontext *ucontext);
void rdma_alloc_abort_uobject(struct ib_uobject *uobj); void rdma_alloc_abort_uobject(struct ib_uobject *uobj);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment