Commit 004e390d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Doug Ledford:
 "Nothing in this is overly interesting, it's mostly your garden variety
  fixes.

  There was some work in this merge cycle around the new ioctl kABI, so
  there are fixes in here related to that (probably with more to come).

  We've also recently added new netlink support with a goal of moving
  the primary means of configuring the entire subsystem to netlink
  (eventually, this is a long term project), so there are fixes for
  that.

  Then a few bnxt_re driver fixes, and a few minor WARN_ON removals, and
  that covers this pull request. There are already a few more fixes on
  the list as of this morning, so there will certainly be more to come
  in this rc cycle ;-)

  Summary:

   - Lots of fixes for the new IOCTL interface and general uverbs flow.
     Found through testing and syzkaller

   - Bugfixes for the new resource track netlink reporting

   - Remove some unneeded WARN_ONs that were triggering for some users
     in IPoIB

   - Various fixes for the bnxt_re driver"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (27 commits)
  RDMA/uverbs: Fix kernel panic while using XRC_TGT QP type
  RDMA/bnxt_re: Avoid system hang during device un-reg
  RDMA/bnxt_re: Fix system crash during load/unload
  RDMA/bnxt_re: Synchronize destroy_qp with poll_cq
  RDMA/bnxt_re: Unpin SQ and RQ memory if QP create fails
  RDMA/bnxt_re: Disable atomic capability on bnxt_re adapters
  RDMA/restrack: don't use uaccess_kernel()
  RDMA/verbs: Check existence of function prior to accessing it
  RDMA/vmw_pvrdma: Fix usage of user response structures in ABI file
  RDMA/uverbs: Sanitize user entered port numbers prior to access it
  RDMA/uverbs: Fix circular locking dependency
  RDMA/uverbs: Fix bad unlock balance in ib_uverbs_close_xrcd
  RDMA/restrack: Increment CQ restrack object before committing
  RDMA/uverbs: Protect from command mask overflow
  IB/uverbs: Fix unbalanced unlock on error path for rdma_explicit_destroy
  IB/uverbs: Improve lockdep_check
  RDMA/uverbs: Protect from races between lookup and destroy of uobjects
  IB/uverbs: Hold the uobj write lock after allocate
  IB/uverbs: Fix possible oops with duplicate ioctl attributes
  IB/uverbs: Add ioctl support for 32bit processes
  ...
parents 24180a60 f4576587
...@@ -305,16 +305,21 @@ void nldev_exit(void); ...@@ -305,16 +305,21 @@ void nldev_exit(void);
static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
struct ib_pd *pd, struct ib_pd *pd,
struct ib_qp_init_attr *attr, struct ib_qp_init_attr *attr,
struct ib_udata *udata) struct ib_udata *udata,
struct ib_uobject *uobj)
{ {
struct ib_qp *qp; struct ib_qp *qp;
if (!dev->create_qp)
return ERR_PTR(-EOPNOTSUPP);
qp = dev->create_qp(pd, attr, udata); qp = dev->create_qp(pd, attr, udata);
if (IS_ERR(qp)) if (IS_ERR(qp))
return qp; return qp;
qp->device = dev; qp->device = dev;
qp->pd = pd; qp->pd = pd;
qp->uobject = uobj;
/* /*
* We don't track XRC QPs for now, because they don't have PD * We don't track XRC QPs for now, because they don't have PD
* and more importantly they are created internaly by driver, * and more importantly they are created internaly by driver,
......
...@@ -141,7 +141,12 @@ static struct ib_uobject *alloc_uobj(struct ib_ucontext *context, ...@@ -141,7 +141,12 @@ static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
*/ */
uobj->context = context; uobj->context = context;
uobj->type = type; uobj->type = type;
atomic_set(&uobj->usecnt, 0); /*
* Allocated objects start out as write locked to deny any other
* syscalls from accessing them until they are committed. See
* rdma_alloc_commit_uobject
*/
atomic_set(&uobj->usecnt, -1);
kref_init(&uobj->ref); kref_init(&uobj->ref);
return uobj; return uobj;
...@@ -196,7 +201,15 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t ...@@ -196,7 +201,15 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t
goto free; goto free;
} }
uverbs_uobject_get(uobj); /*
* The idr_find is guaranteed to return a pointer to something that
* isn't freed yet, or NULL, as the free after idr_remove goes through
* kfree_rcu(). However the object may still have been released and
* kfree() could be called at any time.
*/
if (!kref_get_unless_zero(&uobj->ref))
uobj = ERR_PTR(-ENOENT);
free: free:
rcu_read_unlock(); rcu_read_unlock();
return uobj; return uobj;
...@@ -399,13 +412,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj, ...@@ -399,13 +412,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
return ret; return ret;
} }
static void lockdep_check(struct ib_uobject *uobj, bool exclusive) static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
{ {
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
if (exclusive) if (exclusive)
WARN_ON(atomic_read(&uobj->usecnt) > 0); WARN_ON(atomic_read(&uobj->usecnt) != -1);
else else
WARN_ON(atomic_read(&uobj->usecnt) == -1); WARN_ON(atomic_read(&uobj->usecnt) <= 0);
#endif #endif
} }
...@@ -444,7 +457,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj) ...@@ -444,7 +457,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
return 0; return 0;
} }
lockdep_check(uobj, true); assert_uverbs_usecnt(uobj, true);
ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY); ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
up_read(&ucontext->cleanup_rwsem); up_read(&ucontext->cleanup_rwsem);
...@@ -474,16 +487,17 @@ int rdma_explicit_destroy(struct ib_uobject *uobject) ...@@ -474,16 +487,17 @@ int rdma_explicit_destroy(struct ib_uobject *uobject)
WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
return 0; return 0;
} }
lockdep_check(uobject, true); assert_uverbs_usecnt(uobject, true);
ret = uobject->type->type_class->remove_commit(uobject, ret = uobject->type->type_class->remove_commit(uobject,
RDMA_REMOVE_DESTROY); RDMA_REMOVE_DESTROY);
if (ret) if (ret)
return ret; goto out;
uobject->type = &null_obj_type; uobject->type = &null_obj_type;
out:
up_read(&ucontext->cleanup_rwsem); up_read(&ucontext->cleanup_rwsem);
return 0; return ret;
} }
static void alloc_commit_idr_uobject(struct ib_uobject *uobj) static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
...@@ -527,6 +541,10 @@ int rdma_alloc_commit_uobject(struct ib_uobject *uobj) ...@@ -527,6 +541,10 @@ int rdma_alloc_commit_uobject(struct ib_uobject *uobj)
return ret; return ret;
} }
/* matches atomic_set(-1) in alloc_uobj */
assert_uverbs_usecnt(uobj, true);
atomic_set(&uobj->usecnt, 0);
uobj->type->type_class->alloc_commit(uobj); uobj->type->type_class->alloc_commit(uobj);
up_read(&uobj->context->cleanup_rwsem); up_read(&uobj->context->cleanup_rwsem);
...@@ -561,7 +579,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive) ...@@ -561,7 +579,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive) void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
{ {
lockdep_check(uobj, exclusive); assert_uverbs_usecnt(uobj, exclusive);
uobj->type->type_class->lookup_put(uobj, exclusive); uobj->type->type_class->lookup_put(uobj, exclusive);
/* /*
* In order to unlock an object, either decrease its usecnt for * In order to unlock an object, either decrease its usecnt for
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
#include <rdma/restrack.h> #include <rdma/restrack.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/sched/task.h> #include <linux/sched/task.h>
#include <linux/uaccess.h>
#include <linux/pid_namespace.h> #include <linux/pid_namespace.h>
void rdma_restrack_init(struct rdma_restrack_root *res) void rdma_restrack_init(struct rdma_restrack_root *res)
...@@ -63,7 +62,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res) ...@@ -63,7 +62,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
{ {
enum rdma_restrack_type type = res->type; enum rdma_restrack_type type = res->type;
struct ib_device *dev; struct ib_device *dev;
struct ib_xrcd *xrcd;
struct ib_pd *pd; struct ib_pd *pd;
struct ib_cq *cq; struct ib_cq *cq;
struct ib_qp *qp; struct ib_qp *qp;
...@@ -81,10 +79,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res) ...@@ -81,10 +79,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
qp = container_of(res, struct ib_qp, res); qp = container_of(res, struct ib_qp, res);
dev = qp->device; dev = qp->device;
break; break;
case RDMA_RESTRACK_XRCD:
xrcd = container_of(res, struct ib_xrcd, res);
dev = xrcd->device;
break;
default: default:
WARN_ONCE(true, "Wrong resource tracking type %u\n", type); WARN_ONCE(true, "Wrong resource tracking type %u\n", type);
return NULL; return NULL;
...@@ -93,6 +87,21 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res) ...@@ -93,6 +87,21 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
return dev; return dev;
} }
static bool res_is_user(struct rdma_restrack_entry *res)
{
switch (res->type) {
case RDMA_RESTRACK_PD:
return container_of(res, struct ib_pd, res)->uobject;
case RDMA_RESTRACK_CQ:
return container_of(res, struct ib_cq, res)->uobject;
case RDMA_RESTRACK_QP:
return container_of(res, struct ib_qp, res)->uobject;
default:
WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type);
return false;
}
}
void rdma_restrack_add(struct rdma_restrack_entry *res) void rdma_restrack_add(struct rdma_restrack_entry *res)
{ {
struct ib_device *dev = res_to_dev(res); struct ib_device *dev = res_to_dev(res);
...@@ -100,7 +109,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res) ...@@ -100,7 +109,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
if (!dev) if (!dev)
return; return;
if (!uaccess_kernel()) { if (res_is_user(res)) {
get_task_struct(current); get_task_struct(current);
res->task = current; res->task = current;
res->kern_name = NULL; res->kern_name = NULL;
......
...@@ -562,9 +562,10 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, ...@@ -562,9 +562,10 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
if (f.file) if (f.file)
fdput(f); fdput(f);
mutex_unlock(&file->device->xrcd_tree_mutex);
uobj_alloc_commit(&obj->uobject); uobj_alloc_commit(&obj->uobject);
mutex_unlock(&file->device->xrcd_tree_mutex);
return in_len; return in_len;
err_copy: err_copy:
...@@ -603,10 +604,8 @@ ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, ...@@ -603,10 +604,8 @@ ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle, uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle,
file->ucontext); file->ucontext);
if (IS_ERR(uobj)) { if (IS_ERR(uobj))
mutex_unlock(&file->device->xrcd_tree_mutex);
return PTR_ERR(uobj); return PTR_ERR(uobj);
}
ret = uobj_remove_commit(uobj); ret = uobj_remove_commit(uobj);
return ret ?: in_len; return ret ?: in_len;
...@@ -979,6 +978,9 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, ...@@ -979,6 +978,9 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
struct ib_uverbs_ex_create_cq_resp resp; struct ib_uverbs_ex_create_cq_resp resp;
struct ib_cq_init_attr attr = {}; struct ib_cq_init_attr attr = {};
if (!ib_dev->create_cq)
return ERR_PTR(-EOPNOTSUPP);
if (cmd->comp_vector >= file->device->num_comp_vectors) if (cmd->comp_vector >= file->device->num_comp_vectors)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -1030,14 +1032,14 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, ...@@ -1030,14 +1032,14 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
resp.response_length = offsetof(typeof(resp), response_length) + resp.response_length = offsetof(typeof(resp), response_length) +
sizeof(resp.response_length); sizeof(resp.response_length);
cq->res.type = RDMA_RESTRACK_CQ;
rdma_restrack_add(&cq->res);
ret = cb(file, obj, &resp, ucore, context); ret = cb(file, obj, &resp, ucore, context);
if (ret) if (ret)
goto err_cb; goto err_cb;
uobj_alloc_commit(&obj->uobject); uobj_alloc_commit(&obj->uobject);
cq->res.type = RDMA_RESTRACK_CQ;
rdma_restrack_add(&cq->res);
return obj; return obj;
err_cb: err_cb:
...@@ -1518,7 +1520,8 @@ static int create_qp(struct ib_uverbs_file *file, ...@@ -1518,7 +1520,8 @@ static int create_qp(struct ib_uverbs_file *file,
if (cmd->qp_type == IB_QPT_XRC_TGT) if (cmd->qp_type == IB_QPT_XRC_TGT)
qp = ib_create_qp(pd, &attr); qp = ib_create_qp(pd, &attr);
else else
qp = _ib_create_qp(device, pd, &attr, uhw); qp = _ib_create_qp(device, pd, &attr, uhw,
&obj->uevent.uobject);
if (IS_ERR(qp)) { if (IS_ERR(qp)) {
ret = PTR_ERR(qp); ret = PTR_ERR(qp);
...@@ -1550,8 +1553,10 @@ static int create_qp(struct ib_uverbs_file *file, ...@@ -1550,8 +1553,10 @@ static int create_qp(struct ib_uverbs_file *file,
atomic_inc(&attr.srq->usecnt); atomic_inc(&attr.srq->usecnt);
if (ind_tbl) if (ind_tbl)
atomic_inc(&ind_tbl->usecnt); atomic_inc(&ind_tbl->usecnt);
} else {
/* It is done in _ib_create_qp for other QP types */
qp->uobject = &obj->uevent.uobject;
} }
qp->uobject = &obj->uevent.uobject;
obj->uevent.uobject.object = qp; obj->uevent.uobject.object = qp;
...@@ -1971,8 +1976,15 @@ static int modify_qp(struct ib_uverbs_file *file, ...@@ -1971,8 +1976,15 @@ static int modify_qp(struct ib_uverbs_file *file,
goto release_qp; goto release_qp;
} }
if ((cmd->base.attr_mask & IB_QP_AV) &&
!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
ret = -EINVAL;
goto release_qp;
}
if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
!rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) { (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
!rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) {
ret = -EINVAL; ret = -EINVAL;
goto release_qp; goto release_qp;
} }
...@@ -2941,6 +2953,11 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, ...@@ -2941,6 +2953,11 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
wq_init_attr.create_flags = cmd.create_flags; wq_init_attr.create_flags = cmd.create_flags;
obj->uevent.events_reported = 0; obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list); INIT_LIST_HEAD(&obj->uevent.event_list);
if (!pd->device->create_wq) {
err = -EOPNOTSUPP;
goto err_put_cq;
}
wq = pd->device->create_wq(pd, &wq_init_attr, uhw); wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
if (IS_ERR(wq)) { if (IS_ERR(wq)) {
err = PTR_ERR(wq); err = PTR_ERR(wq);
...@@ -3084,7 +3101,12 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, ...@@ -3084,7 +3101,12 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
wq_attr.flags = cmd.flags; wq_attr.flags = cmd.flags;
wq_attr.flags_mask = cmd.flags_mask; wq_attr.flags_mask = cmd.flags_mask;
} }
if (!wq->device->modify_wq) {
ret = -EOPNOTSUPP;
goto out;
}
ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
out:
uobj_put_obj_read(wq); uobj_put_obj_read(wq);
return ret; return ret;
} }
...@@ -3181,6 +3203,11 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, ...@@ -3181,6 +3203,11 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
init_attr.ind_tbl = wqs; init_attr.ind_tbl = wqs;
if (!ib_dev->create_rwq_ind_table) {
err = -EOPNOTSUPP;
goto err_uobj;
}
rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);
if (IS_ERR(rwq_ind_tbl)) { if (IS_ERR(rwq_ind_tbl)) {
...@@ -3770,6 +3797,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, ...@@ -3770,6 +3797,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
struct ib_device_attr attr = {0}; struct ib_device_attr attr = {0};
int err; int err;
if (!ib_dev->query_device)
return -EOPNOTSUPP;
if (ucore->inlen < sizeof(cmd)) if (ucore->inlen < sizeof(cmd))
return -EINVAL; return -EINVAL;
......
...@@ -59,6 +59,9 @@ static int uverbs_process_attr(struct ib_device *ibdev, ...@@ -59,6 +59,9 @@ static int uverbs_process_attr(struct ib_device *ibdev,
return 0; return 0;
} }
if (test_bit(attr_id, attr_bundle_h->valid_bitmap))
return -EINVAL;
spec = &attr_spec_bucket->attrs[attr_id]; spec = &attr_spec_bucket->attrs[attr_id];
e = &elements[attr_id]; e = &elements[attr_id];
e->uattr = uattr_ptr; e->uattr = uattr_ptr;
......
...@@ -114,6 +114,7 @@ static size_t get_elements_above_id(const void **iters, ...@@ -114,6 +114,7 @@ static size_t get_elements_above_id(const void **iters,
short min = SHRT_MAX; short min = SHRT_MAX;
const void *elem; const void *elem;
int i, j, last_stored = -1; int i, j, last_stored = -1;
unsigned int equal_min = 0;
for_each_element(elem, i, j, elements, num_elements, num_offset, for_each_element(elem, i, j, elements, num_elements, num_offset,
data_offset) { data_offset) {
...@@ -136,6 +137,10 @@ static size_t get_elements_above_id(const void **iters, ...@@ -136,6 +137,10 @@ static size_t get_elements_above_id(const void **iters,
*/ */
iters[last_stored == i ? num_iters - 1 : num_iters++] = elem; iters[last_stored == i ? num_iters - 1 : num_iters++] = elem;
last_stored = i; last_stored = i;
if (min == GET_ID(id))
equal_min++;
else
equal_min = 1;
min = GET_ID(id); min = GET_ID(id);
} }
...@@ -146,15 +151,10 @@ static size_t get_elements_above_id(const void **iters, ...@@ -146,15 +151,10 @@ static size_t get_elements_above_id(const void **iters,
* Therefore, we need to clean the beginning of the array to make sure * Therefore, we need to clean the beginning of the array to make sure
* all ids of final elements are equal to min. * all ids of final elements are equal to min.
*/ */
for (i = num_iters - 1; i >= 0 && memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min);
GET_ID(*(u16 *)(iters[i] + id_offset)) == min; i--)
;
num_iters -= i + 1;
memmove(iters, iters + i + 1, sizeof(*iters) * num_iters);
*min_id = min; *min_id = min;
return num_iters; return equal_min;
} }
#define find_max_element_entry_id(num_elements, elements, num_objects_fld, \ #define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
...@@ -322,7 +322,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me ...@@ -322,7 +322,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
hash = kzalloc(sizeof(*hash) + hash = kzalloc(sizeof(*hash) +
ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1), ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1),
sizeof(long)) + sizeof(long)) +
BITS_TO_LONGS(attr_max_bucket) * sizeof(long), BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long),
GFP_KERNEL); GFP_KERNEL);
if (!hash) { if (!hash) {
res = -ENOMEM; res = -ENOMEM;
...@@ -509,7 +509,7 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_ ...@@ -509,7 +509,7 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_
* first handler which != NULL. This also defines the * first handler which != NULL. This also defines the
* set of flags used for this handler. * set of flags used for this handler.
*/ */
for (i = num_object_defs - 1; for (i = num_method_defs - 1;
i >= 0 && !method_defs[i]->handler; i--) i >= 0 && !method_defs[i]->handler; i--)
; ;
hash->methods[min_id++] = method; hash->methods[min_id++] = method;
......
...@@ -650,12 +650,21 @@ static int verify_command_mask(struct ib_device *ib_dev, __u32 command) ...@@ -650,12 +650,21 @@ static int verify_command_mask(struct ib_device *ib_dev, __u32 command)
return -1; return -1;
} }
static bool verify_command_idx(u32 command, bool extended)
{
if (extended)
return command < ARRAY_SIZE(uverbs_ex_cmd_table);
return command < ARRAY_SIZE(uverbs_cmd_table);
}
static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos) size_t count, loff_t *pos)
{ {
struct ib_uverbs_file *file = filp->private_data; struct ib_uverbs_file *file = filp->private_data;
struct ib_device *ib_dev; struct ib_device *ib_dev;
struct ib_uverbs_cmd_hdr hdr; struct ib_uverbs_cmd_hdr hdr;
bool extended_command;
__u32 command; __u32 command;
__u32 flags; __u32 flags;
int srcu_key; int srcu_key;
...@@ -688,6 +697,15 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, ...@@ -688,6 +697,15 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
} }
command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK; command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
flags = (hdr.command &
IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
extended_command = flags & IB_USER_VERBS_CMD_FLAG_EXTENDED;
if (!verify_command_idx(command, extended_command)) {
ret = -EINVAL;
goto out;
}
if (verify_command_mask(ib_dev, command)) { if (verify_command_mask(ib_dev, command)) {
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto out; goto out;
...@@ -699,12 +717,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, ...@@ -699,12 +717,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
goto out; goto out;
} }
flags = (hdr.command &
IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
if (!flags) { if (!flags) {
if (command >= ARRAY_SIZE(uverbs_cmd_table) || if (!uverbs_cmd_table[command]) {
!uverbs_cmd_table[command]) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -725,8 +739,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, ...@@ -725,8 +739,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
struct ib_udata uhw; struct ib_udata uhw;
size_t written_count = count; size_t written_count = count;
if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) || if (!uverbs_ex_cmd_table[command]) {
!uverbs_ex_cmd_table[command]) {
ret = -ENOSYS; ret = -ENOSYS;
goto out; goto out;
} }
...@@ -942,6 +955,7 @@ static const struct file_operations uverbs_fops = { ...@@ -942,6 +955,7 @@ static const struct file_operations uverbs_fops = {
.llseek = no_llseek, .llseek = no_llseek,
#if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS) #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
.unlocked_ioctl = ib_uverbs_ioctl, .unlocked_ioctl = ib_uverbs_ioctl,
.compat_ioctl = ib_uverbs_ioctl,
#endif #endif
}; };
...@@ -954,6 +968,7 @@ static const struct file_operations uverbs_mmap_fops = { ...@@ -954,6 +968,7 @@ static const struct file_operations uverbs_mmap_fops = {
.llseek = no_llseek, .llseek = no_llseek,
#if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS) #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
.unlocked_ioctl = ib_uverbs_ioctl, .unlocked_ioctl = ib_uverbs_ioctl,
.compat_ioctl = ib_uverbs_ioctl,
#endif #endif
}; };
......
...@@ -234,15 +234,18 @@ static void create_udata(struct uverbs_attr_bundle *ctx, ...@@ -234,15 +234,18 @@ static void create_udata(struct uverbs_attr_bundle *ctx,
uverbs_attr_get(ctx, UVERBS_UHW_OUT); uverbs_attr_get(ctx, UVERBS_UHW_OUT);
if (!IS_ERR(uhw_in)) { if (!IS_ERR(uhw_in)) {
udata->inbuf = uhw_in->ptr_attr.ptr;
udata->inlen = uhw_in->ptr_attr.len; udata->inlen = uhw_in->ptr_attr.len;
if (uverbs_attr_ptr_is_inline(uhw_in))
udata->inbuf = &uhw_in->uattr->data;
else
udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);
} else { } else {
udata->inbuf = NULL; udata->inbuf = NULL;
udata->inlen = 0; udata->inlen = 0;
} }
if (!IS_ERR(uhw_out)) { if (!IS_ERR(uhw_out)) {
udata->outbuf = uhw_out->ptr_attr.ptr; udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);
udata->outlen = uhw_out->ptr_attr.len; udata->outlen = uhw_out->ptr_attr.len;
} else { } else {
udata->outbuf = NULL; udata->outbuf = NULL;
...@@ -323,7 +326,8 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev, ...@@ -323,7 +326,8 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev,
cq->res.type = RDMA_RESTRACK_CQ; cq->res.type = RDMA_RESTRACK_CQ;
rdma_restrack_add(&cq->res); rdma_restrack_add(&cq->res);
ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe); ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe,
sizeof(cq->cqe));
if (ret) if (ret)
goto err_cq; goto err_cq;
...@@ -375,7 +379,7 @@ static int uverbs_destroy_cq_handler(struct ib_device *ib_dev, ...@@ -375,7 +379,7 @@ static int uverbs_destroy_cq_handler(struct ib_device *ib_dev,
resp.comp_events_reported = obj->comp_events_reported; resp.comp_events_reported = obj->comp_events_reported;
resp.async_events_reported = obj->async_events_reported; resp.async_events_reported = obj->async_events_reported;
return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp); return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp, sizeof(resp));
} }
static DECLARE_UVERBS_METHOD( static DECLARE_UVERBS_METHOD(
......
...@@ -887,7 +887,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, ...@@ -887,7 +887,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (qp_init_attr->cap.max_rdma_ctxs) if (qp_init_attr->cap.max_rdma_ctxs)
rdma_rw_init_qp(device, qp_init_attr); rdma_rw_init_qp(device, qp_init_attr);
qp = _ib_create_qp(device, pd, qp_init_attr, NULL); qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);
if (IS_ERR(qp)) if (IS_ERR(qp))
return qp; return qp;
...@@ -898,7 +898,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, ...@@ -898,7 +898,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
} }
qp->real_qp = qp; qp->real_qp = qp;
qp->uobject = NULL;
qp->qp_type = qp_init_attr->qp_type; qp->qp_type = qp_init_attr->qp_type;
qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
......
...@@ -120,7 +120,6 @@ struct bnxt_re_dev { ...@@ -120,7 +120,6 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_HAVE_L2_REF 3 #define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5 #define BNXT_RE_FLAG_QOS_WORK_REG 5
#define BNXT_RE_FLAG_TASK_IN_PROG 6
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29 #define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev; struct net_device *netdev;
unsigned int version, major, minor; unsigned int version, major, minor;
...@@ -158,6 +157,7 @@ struct bnxt_re_dev { ...@@ -158,6 +157,7 @@ struct bnxt_re_dev {
atomic_t srq_count; atomic_t srq_count;
atomic_t mr_count; atomic_t mr_count;
atomic_t mw_count; atomic_t mw_count;
atomic_t sched_count;
/* Max of 2 lossless traffic class supported per port */ /* Max of 2 lossless traffic class supported per port */
u16 cosq[2]; u16 cosq[2];
......
...@@ -174,10 +174,8 @@ int bnxt_re_query_device(struct ib_device *ibdev, ...@@ -174,10 +174,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_pd = dev_attr->max_pd; ib_attr->max_pd = dev_attr->max_pd;
ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
if (dev_attr->is_atomic) { ib_attr->atomic_cap = IB_ATOMIC_NONE;
ib_attr->atomic_cap = IB_ATOMIC_HCA; ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
}
ib_attr->max_ee_rd_atom = 0; ib_attr->max_ee_rd_atom = 0;
ib_attr->max_res_rd_atom = 0; ib_attr->max_res_rd_atom = 0;
...@@ -787,20 +785,51 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) ...@@ -787,20 +785,51 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
return 0; return 0;
} }
static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
__acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
{
unsigned long flags;
spin_lock_irqsave(&qp->scq->cq_lock, flags);
if (qp->rcq != qp->scq)
spin_lock(&qp->rcq->cq_lock);
else
__acquire(&qp->rcq->cq_lock);
return flags;
}
static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
unsigned long flags)
__releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
{
if (qp->rcq != qp->scq)
spin_unlock(&qp->rcq->cq_lock);
else
__release(&qp->rcq->cq_lock);
spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
}
/* Queue Pairs */ /* Queue Pairs */
int bnxt_re_destroy_qp(struct ib_qp *ib_qp) int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
{ {
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev; struct bnxt_re_dev *rdev = qp->rdev;
int rc; int rc;
unsigned int flags;
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
bnxt_qplib_del_flush_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) { if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP"); dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
return rc; return rc;
} }
flags = bnxt_re_lock_cqs(qp);
bnxt_qplib_clean_qp(&qp->qplib_qp);
bnxt_re_unlock_cqs(qp, flags);
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) { if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
&rdev->sqp_ah->qplib_ah); &rdev->sqp_ah->qplib_ah);
...@@ -810,7 +839,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp) ...@@ -810,7 +839,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
return rc; return rc;
} }
bnxt_qplib_del_flush_qp(&qp->qplib_qp); bnxt_qplib_clean_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
&rdev->qp1_sqp->qplib_qp); &rdev->qp1_sqp->qplib_qp);
if (rc) { if (rc) {
...@@ -1069,6 +1098,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, ...@@ -1069,6 +1098,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
goto fail; goto fail;
} }
qp->qplib_qp.scq = &cq->qplib_cq; qp->qplib_qp.scq = &cq->qplib_cq;
qp->scq = cq;
} }
if (qp_init_attr->recv_cq) { if (qp_init_attr->recv_cq) {
...@@ -1080,6 +1110,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, ...@@ -1080,6 +1110,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
goto fail; goto fail;
} }
qp->qplib_qp.rcq = &cq->qplib_cq; qp->qplib_qp.rcq = &cq->qplib_cq;
qp->rcq = cq;
} }
if (qp_init_attr->srq) { if (qp_init_attr->srq) {
...@@ -1185,7 +1216,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, ...@@ -1185,7 +1216,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) { if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
goto fail; goto free_umem;
} }
} }
...@@ -1213,6 +1244,13 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, ...@@ -1213,6 +1244,13 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
return &qp->ib_qp; return &qp->ib_qp;
qp_destroy: qp_destroy:
bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
free_umem:
if (udata) {
if (qp->rumem)
ib_umem_release(qp->rumem);
if (qp->sumem)
ib_umem_release(qp->sumem);
}
fail: fail:
kfree(qp); kfree(qp);
return ERR_PTR(rc); return ERR_PTR(rc);
...@@ -1603,7 +1641,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, ...@@ -1603,7 +1641,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
dev_dbg(rdev_to_dev(rdev), dev_dbg(rdev_to_dev(rdev),
"Move QP = %p out of flush list\n", "Move QP = %p out of flush list\n",
qp); qp);
bnxt_qplib_del_flush_qp(&qp->qplib_qp); bnxt_qplib_clean_qp(&qp->qplib_qp);
} }
} }
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
......
...@@ -89,6 +89,8 @@ struct bnxt_re_qp { ...@@ -89,6 +89,8 @@ struct bnxt_re_qp {
/* QP1 */ /* QP1 */
u32 send_psn; u32 send_psn;
struct ib_ud_header qp1_hdr; struct ib_ud_header qp1_hdr;
struct bnxt_re_cq *scq;
struct bnxt_re_cq *rcq;
}; };
struct bnxt_re_cq { struct bnxt_re_cq {
......
...@@ -656,7 +656,6 @@ static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev) ...@@ -656,7 +656,6 @@ static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
mutex_unlock(&bnxt_re_dev_lock); mutex_unlock(&bnxt_re_dev_lock);
synchronize_rcu(); synchronize_rcu();
flush_workqueue(bnxt_re_wq);
ib_dealloc_device(&rdev->ibdev); ib_dealloc_device(&rdev->ibdev);
/* rdev is gone */ /* rdev is gone */
...@@ -1441,7 +1440,7 @@ static void bnxt_re_task(struct work_struct *work) ...@@ -1441,7 +1440,7 @@ static void bnxt_re_task(struct work_struct *work)
break; break;
} }
smp_mb__before_atomic(); smp_mb__before_atomic();
clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); atomic_dec(&rdev->sched_count);
kfree(re_work); kfree(re_work);
} }
...@@ -1503,7 +1502,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, ...@@ -1503,7 +1502,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
/* netdev notifier will call NETDEV_UNREGISTER again later since /* netdev notifier will call NETDEV_UNREGISTER again later since
* we are still holding the reference to the netdev * we are still holding the reference to the netdev
*/ */
if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags)) if (atomic_read(&rdev->sched_count) > 0)
goto exit; goto exit;
bnxt_re_ib_unreg(rdev, false); bnxt_re_ib_unreg(rdev, false);
bnxt_re_remove_one(rdev); bnxt_re_remove_one(rdev);
...@@ -1523,7 +1522,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, ...@@ -1523,7 +1522,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
re_work->vlan_dev = (real_dev == netdev ? re_work->vlan_dev = (real_dev == netdev ?
NULL : netdev); NULL : netdev);
INIT_WORK(&re_work->work, bnxt_re_task); INIT_WORK(&re_work->work, bnxt_re_task);
set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); atomic_inc(&rdev->sched_count);
queue_work(bnxt_re_wq, &re_work->work); queue_work(bnxt_re_wq, &re_work->work);
} }
} }
...@@ -1578,6 +1577,11 @@ static void __exit bnxt_re_mod_exit(void) ...@@ -1578,6 +1577,11 @@ static void __exit bnxt_re_mod_exit(void)
*/ */
list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) { list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
dev_info(rdev_to_dev(rdev), "Unregistering Device"); dev_info(rdev_to_dev(rdev), "Unregistering Device");
/*
* Flush out any scheduled tasks before destroying the
* resources
*/
flush_workqueue(bnxt_re_wq);
bnxt_re_dev_stop(rdev); bnxt_re_dev_stop(rdev);
bnxt_re_ib_unreg(rdev, true); bnxt_re_ib_unreg(rdev, true);
bnxt_re_remove_one(rdev); bnxt_re_remove_one(rdev);
......
...@@ -173,7 +173,7 @@ static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) ...@@ -173,7 +173,7 @@ static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
} }
} }
void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
{ {
unsigned long flags; unsigned long flags;
...@@ -1419,7 +1419,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, ...@@ -1419,7 +1419,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_qp req; struct cmdq_destroy_qp req;
struct creq_destroy_qp_resp resp; struct creq_destroy_qp_resp resp;
unsigned long flags;
u16 cmd_flags = 0; u16 cmd_flags = 0;
int rc; int rc;
...@@ -1437,19 +1436,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, ...@@ -1437,19 +1436,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
return rc; return rc;
} }
/* Must walk the associated CQs to nullified the QP ptr */ return 0;
spin_lock_irqsave(&qp->scq->hwq.lock, flags); }
__clean_cq(qp->scq, (u64)(unsigned long)qp);
if (qp->rcq && qp->rcq != qp->scq) {
spin_lock(&qp->rcq->hwq.lock);
__clean_cq(qp->rcq, (u64)(unsigned long)qp);
spin_unlock(&qp->rcq->hwq.lock);
}
spin_unlock_irqrestore(&qp->scq->hwq.lock, flags);
void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp)
{
bnxt_qplib_free_qp_hdr_buf(res, qp); bnxt_qplib_free_qp_hdr_buf(res, qp);
bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq); bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
kfree(qp->sq.swq); kfree(qp->sq.swq);
...@@ -1462,7 +1454,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, ...@@ -1462,7 +1454,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
if (qp->orrq.max_elements) if (qp->orrq.max_elements)
bnxt_qplib_free_hwq(res->pdev, &qp->orrq); bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
return 0;
} }
void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
......
...@@ -478,6 +478,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); ...@@ -478,6 +478,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp);
void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_sge *sge); struct bnxt_qplib_sge *sge);
void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
...@@ -500,7 +503,6 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); ...@@ -500,7 +503,6 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp); void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
unsigned long *flags); unsigned long *flags);
void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
......
...@@ -52,18 +52,6 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0, ...@@ -52,18 +52,6 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
/* Device */ /* Device */
static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
{
int rc;
u16 pcie_ctl2;
rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2,
&pcie_ctl2);
if (rc)
return false;
return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
}
static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
char *fw_ver) char *fw_ver)
{ {
...@@ -165,7 +153,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, ...@@ -165,7 +153,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
} }
attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw); attr->is_atomic = 0;
bail: bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc; return rc;
......
...@@ -114,6 +114,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, ...@@ -114,6 +114,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
union pvrdma_cmd_resp rsp; union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_cq *cmd = &req.create_cq; struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
struct pvrdma_create_cq_resp cq_resp = {0};
struct pvrdma_create_cq ucmd; struct pvrdma_create_cq ucmd;
BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
...@@ -197,6 +198,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, ...@@ -197,6 +198,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
cq->ibcq.cqe = resp->cqe; cq->ibcq.cqe = resp->cqe;
cq->cq_handle = resp->cq_handle; cq->cq_handle = resp->cq_handle;
cq_resp.cqn = resp->cq_handle;
spin_lock_irqsave(&dev->cq_tbl_lock, flags); spin_lock_irqsave(&dev->cq_tbl_lock, flags);
dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
...@@ -205,7 +207,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, ...@@ -205,7 +207,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
cq->uar = &(to_vucontext(context)->uar); cq->uar = &(to_vucontext(context)->uar);
/* Copy udata back. */ /* Copy udata back. */
if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) { if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
dev_warn(&dev->pdev->dev, dev_warn(&dev->pdev->dev,
"failed to copy back udata\n"); "failed to copy back udata\n");
pvrdma_destroy_cq(&cq->ibcq); pvrdma_destroy_cq(&cq->ibcq);
......
...@@ -113,6 +113,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, ...@@ -113,6 +113,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
union pvrdma_cmd_resp rsp; union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_srq *cmd = &req.create_srq; struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp; struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
struct pvrdma_create_srq_resp srq_resp = {0};
struct pvrdma_create_srq ucmd; struct pvrdma_create_srq ucmd;
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -204,12 +205,13 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, ...@@ -204,12 +205,13 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
} }
srq->srq_handle = resp->srqn; srq->srq_handle = resp->srqn;
srq_resp.srqn = resp->srqn;
spin_lock_irqsave(&dev->srq_tbl_lock, flags); spin_lock_irqsave(&dev->srq_tbl_lock, flags);
dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq; dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
/* Copy udata back. */ /* Copy udata back. */
if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) { if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
pvrdma_destroy_srq(&srq->ibsrq); pvrdma_destroy_srq(&srq->ibsrq);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
......
...@@ -447,6 +447,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, ...@@ -447,6 +447,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
union pvrdma_cmd_resp rsp; union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_pd *cmd = &req.create_pd; struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
struct pvrdma_alloc_pd_resp pd_resp = {0};
int ret; int ret;
void *ptr; void *ptr;
...@@ -475,9 +476,10 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, ...@@ -475,9 +476,10 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
pd->privileged = !context; pd->privileged = !context;
pd->pd_handle = resp->pd_handle; pd->pd_handle = resp->pd_handle;
pd->pdn = resp->pd_handle; pd->pdn = resp->pd_handle;
pd_resp.pdn = resp->pd_handle;
if (context) { if (context) {
if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {
dev_warn(&dev->pdev->dev, dev_warn(&dev->pdev->dev,
"failed to copy back protection domain\n"); "failed to copy back protection domain\n");
pvrdma_dealloc_pd(&pd->ibpd); pvrdma_dealloc_pd(&pd->ibpd);
......
...@@ -281,8 +281,6 @@ void ipoib_delete_debug_files(struct net_device *dev) ...@@ -281,8 +281,6 @@ void ipoib_delete_debug_files(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_dev_priv *priv = ipoib_priv(dev);
WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n");
WARN_ONCE(!priv->path_dentry, "null path debug file\n");
debugfs_remove(priv->mcg_dentry); debugfs_remove(priv->mcg_dentry);
debugfs_remove(priv->path_dentry); debugfs_remove(priv->path_dentry);
priv->mcg_dentry = priv->path_dentry = NULL; priv->mcg_dentry = priv->path_dentry = NULL;
......
...@@ -28,10 +28,6 @@ enum rdma_restrack_type { ...@@ -28,10 +28,6 @@ enum rdma_restrack_type {
* @RDMA_RESTRACK_QP: Queue pair (QP) * @RDMA_RESTRACK_QP: Queue pair (QP)
*/ */
RDMA_RESTRACK_QP, RDMA_RESTRACK_QP,
/**
* @RDMA_RESTRACK_XRCD: XRC domain (XRCD)
*/
RDMA_RESTRACK_XRCD,
/** /**
* @RDMA_RESTRACK_MAX: Last entry, used for array dclarations * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
*/ */
......
...@@ -276,10 +276,7 @@ struct uverbs_object_tree_def { ...@@ -276,10 +276,7 @@ struct uverbs_object_tree_def {
*/ */
struct uverbs_ptr_attr { struct uverbs_ptr_attr {
union { u64 data;
u64 data;
void __user *ptr;
};
u16 len; u16 len;
/* Combination of bits from enum UVERBS_ATTR_F_XXXX */ /* Combination of bits from enum UVERBS_ATTR_F_XXXX */
u16 flags; u16 flags;
...@@ -351,38 +348,60 @@ static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr ...@@ -351,38 +348,60 @@ static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr
} }
static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
size_t idx, const void *from) size_t idx, const void *from, size_t size)
{ {
const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
u16 flags; u16 flags;
size_t min_size;
if (IS_ERR(attr)) if (IS_ERR(attr))
return PTR_ERR(attr); return PTR_ERR(attr);
min_size = min_t(size_t, attr->ptr_attr.len, size);
if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
return -EFAULT;
flags = attr->ptr_attr.flags | UVERBS_ATTR_F_VALID_OUTPUT; flags = attr->ptr_attr.flags | UVERBS_ATTR_F_VALID_OUTPUT;
return (!copy_to_user(attr->ptr_attr.ptr, from, attr->ptr_attr.len) && if (put_user(flags, &attr->uattr->flags))
!put_user(flags, &attr->uattr->flags)) ? 0 : -EFAULT; return -EFAULT;
return 0;
} }
static inline int _uverbs_copy_from(void *to, size_t to_size, static inline bool uverbs_attr_ptr_is_inline(const struct uverbs_attr *attr)
{
return attr->ptr_attr.len <= sizeof(attr->ptr_attr.data);
}
static inline int _uverbs_copy_from(void *to,
const struct uverbs_attr_bundle *attrs_bundle, const struct uverbs_attr_bundle *attrs_bundle,
size_t idx) size_t idx,
size_t size)
{ {
const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
if (IS_ERR(attr)) if (IS_ERR(attr))
return PTR_ERR(attr); return PTR_ERR(attr);
if (to_size <= sizeof(((struct ib_uverbs_attr *)0)->data)) /*
* Validation ensures attr->ptr_attr.len >= size. If the caller is
* using UVERBS_ATTR_SPEC_F_MIN_SZ then it must call copy_from with
* the right size.
*/
if (unlikely(size < attr->ptr_attr.len))
return -EINVAL;
if (uverbs_attr_ptr_is_inline(attr))
memcpy(to, &attr->ptr_attr.data, attr->ptr_attr.len); memcpy(to, &attr->ptr_attr.data, attr->ptr_attr.len);
else if (copy_from_user(to, attr->ptr_attr.ptr, attr->ptr_attr.len)) else if (copy_from_user(to, u64_to_user_ptr(attr->ptr_attr.data),
attr->ptr_attr.len))
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
#define uverbs_copy_from(to, attrs_bundle, idx) \ #define uverbs_copy_from(to, attrs_bundle, idx) \
_uverbs_copy_from(to, sizeof(*(to)), attrs_bundle, idx) _uverbs_copy_from(to, attrs_bundle, idx, sizeof(*to))
/* ================================================= /* =================================================
* Definitions -> Specs infrastructure * Definitions -> Specs infrastructure
......
...@@ -65,7 +65,7 @@ struct ib_uverbs_attr { ...@@ -65,7 +65,7 @@ struct ib_uverbs_attr {
__u16 len; /* only for pointers */ __u16 len; /* only for pointers */
__u16 flags; /* combination of UVERBS_ATTR_F_XXXX */ __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
__u16 reserved; __u16 reserved;
__u64 data; /* ptr to command, inline data or idr/fd */ __aligned_u64 data; /* ptr to command, inline data or idr/fd */
}; };
struct ib_uverbs_ioctl_hdr { struct ib_uverbs_ioctl_hdr {
...@@ -73,7 +73,7 @@ struct ib_uverbs_ioctl_hdr { ...@@ -73,7 +73,7 @@ struct ib_uverbs_ioctl_hdr {
__u16 object_id; __u16 object_id;
__u16 method_id; __u16 method_id;
__u16 num_attrs; __u16 num_attrs;
__u64 reserved; __aligned_u64 reserved;
struct ib_uverbs_attr attrs[0]; struct ib_uverbs_attr attrs[0];
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment