Commit 87d93e9a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Several regression fixes from work that landed in the merge window,
  particularly in the mlx5 driver:

   - Various static checker and warning fixes

   - General bug fixes in rvt, qedr, hns, mlx5 and hfi1

   - Several regression fixes related to the ECE and QP changes in last
     cycle

   - Fixes for a few long standing crashers in CMA, uverbs ioctl, and
     xrc"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (25 commits)
  IB/hfi1: Add atomic triggered sleep/wakeup
  IB/hfi1: Correct -EBUSY handling in tx code
  IB/hfi1: Fix module use count flaw due to leftover module put calls
  IB/hfi1: Restore kfree in dummy_netdev cleanup
  IB/mad: Fix use after free when destroying MAD agent
  RDMA/mlx5: Protect from kernel crash if XRC_TGT doesn't have udata
  RDMA/counter: Query a counter before release
  RDMA/mad: Fix possible memory leak in ib_mad_post_receive_mads()
  RDMA/mlx5: Fix integrity enabled QP creation
  RDMA/mlx5: Remove ECE limitation from the RAW_PACKET QPs
  RDMA/mlx5: Fix remote gid value in query QP
  RDMA/mlx5: Don't access ib_qp fields in internal destroy QP path
  RDMA/core: Check that type_attrs is not NULL prior access
  RDMA/hns: Fix an cmd queue issue when resetting
  RDMA/hns: Fix a calltrace when registering MR from userspace
  RDMA/mlx5: Add missed RST2INIT and INIT2INIT steps during ECE handshake
  RDMA/cma: Protect bind_list and listen_list while finding matching cm id
  RDMA/qedr: Fix KASAN: use-after-free in ucma_event_handler+0x532
  RDMA/efa: Set maximum pkeys device attribute
  RDMA/rvt: Fix potential memory leak caused by rvt_alloc_rq
  ...
parents 908f7d12 38fd98af
...@@ -918,6 +918,7 @@ static void cm_free_work(struct cm_work *work) ...@@ -918,6 +918,7 @@ static void cm_free_work(struct cm_work *work)
static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv, static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
struct cm_work *work) struct cm_work *work)
__releases(&cm_id_priv->lock)
{ {
bool immediate; bool immediate;
......
...@@ -1624,6 +1624,8 @@ static struct rdma_id_private *cma_find_listener( ...@@ -1624,6 +1624,8 @@ static struct rdma_id_private *cma_find_listener(
{ {
struct rdma_id_private *id_priv, *id_priv_dev; struct rdma_id_private *id_priv, *id_priv_dev;
lockdep_assert_held(&lock);
if (!bind_list) if (!bind_list)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -1670,6 +1672,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id, ...@@ -1670,6 +1672,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
} }
} }
mutex_lock(&lock);
/* /*
* Net namespace might be getting deleted while route lookup, * Net namespace might be getting deleted while route lookup,
* cm_id lookup is in progress. Therefore, perform netdevice * cm_id lookup is in progress. Therefore, perform netdevice
...@@ -1711,6 +1714,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id, ...@@ -1711,6 +1714,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
err: err:
rcu_read_unlock(); rcu_read_unlock();
mutex_unlock(&lock);
if (IS_ERR(id_priv) && *net_dev) { if (IS_ERR(id_priv) && *net_dev) {
dev_put(*net_dev); dev_put(*net_dev);
*net_dev = NULL; *net_dev = NULL;
...@@ -2492,6 +2496,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, ...@@ -2492,6 +2496,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
struct net *net = id_priv->id.route.addr.dev_addr.net; struct net *net = id_priv->id.route.addr.dev_addr.net;
int ret; int ret;
lockdep_assert_held(&lock);
if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
return; return;
...@@ -3342,6 +3348,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list, ...@@ -3342,6 +3348,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,
u64 sid, mask; u64 sid, mask;
__be16 port; __be16 port;
lockdep_assert_held(&lock);
addr = cma_src_addr(id_priv); addr = cma_src_addr(id_priv);
port = htons(bind_list->port); port = htons(bind_list->port);
...@@ -3370,6 +3378,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps, ...@@ -3370,6 +3378,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps,
struct rdma_bind_list *bind_list; struct rdma_bind_list *bind_list;
int ret; int ret;
lockdep_assert_held(&lock);
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
if (!bind_list) if (!bind_list)
return -ENOMEM; return -ENOMEM;
...@@ -3396,6 +3406,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list, ...@@ -3396,6 +3406,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
struct sockaddr *saddr = cma_src_addr(id_priv); struct sockaddr *saddr = cma_src_addr(id_priv);
__be16 dport = cma_port(daddr); __be16 dport = cma_port(daddr);
lockdep_assert_held(&lock);
hlist_for_each_entry(cur_id, &bind_list->owners, node) { hlist_for_each_entry(cur_id, &bind_list->owners, node) {
struct sockaddr *cur_daddr = cma_dst_addr(cur_id); struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
struct sockaddr *cur_saddr = cma_src_addr(cur_id); struct sockaddr *cur_saddr = cma_src_addr(cur_id);
...@@ -3435,6 +3447,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps, ...@@ -3435,6 +3447,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
unsigned int rover; unsigned int rover;
struct net *net = id_priv->id.route.addr.dev_addr.net; struct net *net = id_priv->id.route.addr.dev_addr.net;
lockdep_assert_held(&lock);
inet_get_local_port_range(net, &low, &high); inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1; remaining = (high - low) + 1;
rover = prandom_u32() % remaining + low; rover = prandom_u32() % remaining + low;
...@@ -3482,6 +3496,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list, ...@@ -3482,6 +3496,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
struct rdma_id_private *cur_id; struct rdma_id_private *cur_id;
struct sockaddr *addr, *cur_addr; struct sockaddr *addr, *cur_addr;
lockdep_assert_held(&lock);
addr = cma_src_addr(id_priv); addr = cma_src_addr(id_priv);
hlist_for_each_entry(cur_id, &bind_list->owners, node) { hlist_for_each_entry(cur_id, &bind_list->owners, node) {
if (id_priv == cur_id) if (id_priv == cur_id)
...@@ -3512,6 +3528,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps, ...@@ -3512,6 +3528,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps,
unsigned short snum; unsigned short snum;
int ret; int ret;
lockdep_assert_held(&lock);
snum = ntohs(cma_port(cma_src_addr(id_priv))); snum = ntohs(cma_port(cma_src_addr(id_priv)));
if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
return -EACCES; return -EACCES;
......
...@@ -202,7 +202,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp) ...@@ -202,7 +202,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp)
return ret; return ret;
} }
static void counter_history_stat_update(const struct rdma_counter *counter) static void counter_history_stat_update(struct rdma_counter *counter)
{ {
struct ib_device *dev = counter->device; struct ib_device *dev = counter->device;
struct rdma_port_counter *port_counter; struct rdma_port_counter *port_counter;
...@@ -212,6 +212,8 @@ static void counter_history_stat_update(const struct rdma_counter *counter) ...@@ -212,6 +212,8 @@ static void counter_history_stat_update(const struct rdma_counter *counter)
if (!port_counter->hstats) if (!port_counter->hstats)
return; return;
rdma_counter_query_stats(counter);
for (i = 0; i < counter->stats->num_counters; i++) for (i = 0; i < counter->stats->num_counters; i++)
port_counter->hstats->value[i] += counter->stats->value[i]; port_counter->hstats->value[i] += counter->stats->value[i];
} }
......
...@@ -509,10 +509,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) ...@@ -509,10 +509,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
flush_workqueue(port_priv->wq); flush_workqueue(port_priv->wq);
ib_cancel_rmpp_recvs(mad_agent_priv);
deref_mad_agent(mad_agent_priv); deref_mad_agent(mad_agent_priv);
wait_for_completion(&mad_agent_priv->comp); wait_for_completion(&mad_agent_priv->comp);
ib_cancel_rmpp_recvs(mad_agent_priv);
ib_mad_agent_security_cleanup(&mad_agent_priv->agent); ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
...@@ -2718,6 +2718,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, ...@@ -2718,6 +2718,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
sg_list.addr))) { sg_list.addr))) {
kfree(mad_priv);
ret = -ENOMEM; ret = -ENOMEM;
break; break;
} }
......
...@@ -470,40 +470,46 @@ static struct ib_uobject * ...@@ -470,40 +470,46 @@ static struct ib_uobject *
alloc_begin_fd_uobject(const struct uverbs_api_object *obj, alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
struct uverbs_attr_bundle *attrs) struct uverbs_attr_bundle *attrs)
{ {
const struct uverbs_obj_fd_type *fd_type = const struct uverbs_obj_fd_type *fd_type;
container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
int new_fd; int new_fd;
struct ib_uobject *uobj; struct ib_uobject *uobj, *ret;
struct file *filp; struct file *filp;
uobj = alloc_uobj(attrs, obj);
if (IS_ERR(uobj))
return uobj;
fd_type =
container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release && if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
fd_type->fops->release != &uverbs_async_event_release)) fd_type->fops->release != &uverbs_async_event_release)) {
return ERR_PTR(-EINVAL); ret = ERR_PTR(-EINVAL);
goto err_fd;
}
new_fd = get_unused_fd_flags(O_CLOEXEC); new_fd = get_unused_fd_flags(O_CLOEXEC);
if (new_fd < 0) if (new_fd < 0) {
return ERR_PTR(new_fd); ret = ERR_PTR(new_fd);
uobj = alloc_uobj(attrs, obj);
if (IS_ERR(uobj))
goto err_fd; goto err_fd;
}
/* Note that uverbs_uobject_fd_release() is called during abort */ /* Note that uverbs_uobject_fd_release() is called during abort */
filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL, filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
fd_type->flags); fd_type->flags);
if (IS_ERR(filp)) { if (IS_ERR(filp)) {
uverbs_uobject_put(uobj); ret = ERR_CAST(filp);
uobj = ERR_CAST(filp); goto err_getfile;
goto err_fd;
} }
uobj->object = filp; uobj->object = filp;
uobj->id = new_fd; uobj->id = new_fd;
return uobj; return uobj;
err_fd: err_getfile:
put_unused_fd(new_fd); put_unused_fd(new_fd);
return uobj; err_fd:
uverbs_uobject_put(uobj);
return ret;
} }
struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj, struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
......
...@@ -212,6 +212,7 @@ int efa_query_device(struct ib_device *ibdev, ...@@ -212,6 +212,7 @@ int efa_query_device(struct ib_device *ibdev,
props->max_send_sge = dev_attr->max_sq_sge; props->max_send_sge = dev_attr->max_sq_sge;
props->max_recv_sge = dev_attr->max_rq_sge; props->max_recv_sge = dev_attr->max_rq_sge;
props->max_sge_rd = dev_attr->max_wr_rdma_sge; props->max_sge_rd = dev_attr->max_wr_rdma_sge;
props->max_pkeys = 1;
if (udata && udata->outlen) { if (udata && udata->outlen) {
resp.max_sq_sge = dev_attr->max_sq_sge; resp.max_sq_sge = dev_attr->max_sq_sge;
......
...@@ -985,15 +985,10 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf, ...@@ -985,15 +985,10 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target) static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target)
{ {
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
int ret;
ppd = private2ppd(fp); ppd = private2ppd(fp);
ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
if (ret) /* failed - release the module */
module_put(THIS_MODULE);
return ret;
} }
static int i2c1_debugfs_open(struct inode *in, struct file *fp) static int i2c1_debugfs_open(struct inode *in, struct file *fp)
...@@ -1013,7 +1008,6 @@ static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target) ...@@ -1013,7 +1008,6 @@ static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target)
ppd = private2ppd(fp); ppd = private2ppd(fp);
release_chip_resource(ppd->dd, i2c_target(target)); release_chip_resource(ppd->dd, i2c_target(target));
module_put(THIS_MODULE);
return 0; return 0;
} }
...@@ -1031,18 +1025,10 @@ static int i2c2_debugfs_release(struct inode *in, struct file *fp) ...@@ -1031,18 +1025,10 @@ static int i2c2_debugfs_release(struct inode *in, struct file *fp)
static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target) static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target)
{ {
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
int ret;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
ppd = private2ppd(fp); ppd = private2ppd(fp);
ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
if (ret) /* failed - release the module */
module_put(THIS_MODULE);
return ret;
} }
static int qsfp1_debugfs_open(struct inode *in, struct file *fp) static int qsfp1_debugfs_open(struct inode *in, struct file *fp)
...@@ -1062,7 +1048,6 @@ static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target) ...@@ -1062,7 +1048,6 @@ static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target)
ppd = private2ppd(fp); ppd = private2ppd(fp);
release_chip_resource(ppd->dd, i2c_target(target)); release_chip_resource(ppd->dd, i2c_target(target));
module_put(THIS_MODULE);
return 0; return 0;
} }
......
...@@ -399,7 +399,7 @@ static inline void iowait_get_priority(struct iowait *w) ...@@ -399,7 +399,7 @@ static inline void iowait_get_priority(struct iowait *w)
* @wait_head: the wait queue * @wait_head: the wait queue
* *
* This function is called to insert an iowait struct into a * This function is called to insert an iowait struct into a
* wait queue after a resource (eg, sdma decriptor or pio * wait queue after a resource (eg, sdma descriptor or pio
* buffer) is run out. * buffer) is run out.
*/ */
static inline void iowait_queue(bool pkts_sent, struct iowait *w, static inline void iowait_queue(bool pkts_sent, struct iowait *w,
......
...@@ -67,6 +67,9 @@ struct hfi1_ipoib_circ_buf { ...@@ -67,6 +67,9 @@ struct hfi1_ipoib_circ_buf {
* @sde: sdma engine * @sde: sdma engine
* @tx_list: tx request list * @tx_list: tx request list
* @sent_txreqs: count of txreqs posted to sdma * @sent_txreqs: count of txreqs posted to sdma
* @stops: count of stops of queue
* @ring_full: ring has been filled
* @no_desc: descriptor shortage seen
* @flow: tracks when list needs to be flushed for a flow change * @flow: tracks when list needs to be flushed for a flow change
* @q_idx: ipoib Tx queue index * @q_idx: ipoib Tx queue index
* @pkts_sent: indicator packets have been sent from this queue * @pkts_sent: indicator packets have been sent from this queue
...@@ -80,6 +83,9 @@ struct hfi1_ipoib_txq { ...@@ -80,6 +83,9 @@ struct hfi1_ipoib_txq {
struct sdma_engine *sde; struct sdma_engine *sde;
struct list_head tx_list; struct list_head tx_list;
u64 sent_txreqs; u64 sent_txreqs;
atomic_t stops;
atomic_t ring_full;
atomic_t no_desc;
union hfi1_ipoib_flow flow; union hfi1_ipoib_flow flow;
u8 q_idx; u8 q_idx;
bool pkts_sent; bool pkts_sent;
......
...@@ -55,23 +55,48 @@ static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed) ...@@ -55,23 +55,48 @@ static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
return sent - completed; return sent - completed;
} }
static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq) static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
{ {
if (unlikely(hfi1_ipoib_txreqs(++txq->sent_txreqs, return hfi1_ipoib_txreqs(txq->sent_txreqs,
atomic64_read(&txq->complete_txreqs)) >= atomic64_read(&txq->complete_txreqs));
min_t(unsigned int, txq->priv->netdev->tx_queue_len, }
txq->tx_ring.max_items - 1)))
static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
{
if (atomic_inc_return(&txq->stops) == 1)
netif_stop_subqueue(txq->priv->netdev, txq->q_idx); netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
} }
static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
{
if (atomic_dec_and_test(&txq->stops))
netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
}
static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq)
{
return min_t(uint, txq->priv->netdev->tx_queue_len,
txq->tx_ring.max_items - 1);
}
static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
{
return min_t(uint, txq->priv->netdev->tx_queue_len,
txq->tx_ring.max_items) >> 1;
}
static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
{
++txq->sent_txreqs;
if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
!atomic_xchg(&txq->ring_full, 1))
hfi1_ipoib_stop_txq(txq);
}
static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq) static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
{ {
struct net_device *dev = txq->priv->netdev; struct net_device *dev = txq->priv->netdev;
/* If the queue is already running just return */
if (likely(!__netif_subqueue_stopped(dev, txq->q_idx)))
return;
/* If shutting down just return as queue state is irrelevant */ /* If shutting down just return as queue state is irrelevant */
if (unlikely(dev->reg_state != NETREG_REGISTERED)) if (unlikely(dev->reg_state != NETREG_REGISTERED))
return; return;
...@@ -86,11 +111,9 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq) ...@@ -86,11 +111,9 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
* Use the minimum of the current tx_queue_len or the rings max txreqs * Use the minimum of the current tx_queue_len or the rings max txreqs
* to protect against ring overflow. * to protect against ring overflow.
*/ */
if (hfi1_ipoib_txreqs(txq->sent_txreqs, if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
atomic64_read(&txq->complete_txreqs)) atomic_xchg(&txq->ring_full, 0))
< min_t(unsigned int, dev->tx_queue_len, hfi1_ipoib_wake_txq(txq);
txq->tx_ring.max_items) >> 1)
netif_wake_subqueue(dev, txq->q_idx);
} }
static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget) static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
...@@ -364,11 +387,12 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev, ...@@ -364,11 +387,12 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
if (unlikely(!tx)) if (unlikely(!tx))
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
/* so that we can test if the sdma decriptors are there */ /* so that we can test if the sdma descriptors are there */
tx->txreq.num_desc = 0; tx->txreq.num_desc = 0;
tx->priv = priv; tx->priv = priv;
tx->txq = txp->txq; tx->txq = txp->txq;
tx->skb = skb; tx->skb = skb;
INIT_LIST_HEAD(&tx->txreq.list);
hfi1_ipoib_build_ib_tx_headers(tx, txp); hfi1_ipoib_build_ib_tx_headers(tx, txp);
...@@ -469,6 +493,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev, ...@@ -469,6 +493,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
ret = hfi1_ipoib_submit_tx(txq, tx); ret = hfi1_ipoib_submit_tx(txq, tx);
if (likely(!ret)) { if (likely(!ret)) {
tx_ok:
trace_sdma_output_ibhdr(tx->priv->dd, trace_sdma_output_ibhdr(tx->priv->dd,
&tx->sdma_hdr.hdr, &tx->sdma_hdr.hdr,
ib_is_sc5(txp->flow.sc5)); ib_is_sc5(txp->flow.sc5));
...@@ -478,20 +503,8 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev, ...@@ -478,20 +503,8 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
txq->pkts_sent = false; txq->pkts_sent = false;
if (ret == -EBUSY) { if (ret == -EBUSY || ret == -ECOMM)
list_add_tail(&tx->txreq.list, &txq->tx_list); goto tx_ok;
trace_sdma_output_ibhdr(tx->priv->dd,
&tx->sdma_hdr.hdr,
ib_is_sc5(txp->flow.sc5));
hfi1_ipoib_check_queue_depth(txq);
return NETDEV_TX_OK;
}
if (ret == -ECOMM) {
hfi1_ipoib_check_queue_depth(txq);
return NETDEV_TX_OK;
}
sdma_txclean(priv->dd, &tx->txreq); sdma_txclean(priv->dd, &tx->txreq);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
...@@ -509,9 +522,17 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev, ...@@ -509,9 +522,17 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
struct ipoib_txreq *tx; struct ipoib_txreq *tx;
/* Has the flow change ? */ /* Has the flow change ? */
if (txq->flow.as_int != txp->flow.as_int) if (txq->flow.as_int != txp->flow.as_int) {
(void)hfi1_ipoib_flush_tx_list(dev, txq); int ret;
ret = hfi1_ipoib_flush_tx_list(dev, txq);
if (unlikely(ret)) {
if (ret == -EBUSY)
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
}
tx = hfi1_ipoib_send_dma_common(dev, skb, txp); tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
if (IS_ERR(tx)) { if (IS_ERR(tx)) {
int ret = PTR_ERR(tx); int ret = PTR_ERR(tx);
...@@ -610,10 +631,14 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde, ...@@ -610,10 +631,14 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
return -EAGAIN; return -EAGAIN;
} }
netif_stop_subqueue(txq->priv->netdev, txq->q_idx); if (list_empty(&txreq->list))
/* came from non-list submit */
if (list_empty(&txq->wait.list)) list_add_tail(&txreq->list, &txq->tx_list);
if (list_empty(&txq->wait.list)) {
if (!atomic_xchg(&txq->no_desc, 1))
hfi1_ipoib_stop_txq(txq);
iowait_queue(pkts_sent, wait->iow, &sde->dmawait); iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
}
write_sequnlock(&sde->waitlock); write_sequnlock(&sde->waitlock);
return -EBUSY; return -EBUSY;
...@@ -648,9 +673,9 @@ static void hfi1_ipoib_flush_txq(struct work_struct *work) ...@@ -648,9 +673,9 @@ static void hfi1_ipoib_flush_txq(struct work_struct *work)
struct net_device *dev = txq->priv->netdev; struct net_device *dev = txq->priv->netdev;
if (likely(dev->reg_state == NETREG_REGISTERED) && if (likely(dev->reg_state == NETREG_REGISTERED) &&
likely(__netif_subqueue_stopped(dev, txq->q_idx)) &&
likely(!hfi1_ipoib_flush_tx_list(dev, txq))) likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
netif_wake_subqueue(dev, txq->q_idx); if (atomic_xchg(&txq->no_desc, 0))
hfi1_ipoib_wake_txq(txq);
} }
int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
...@@ -704,6 +729,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) ...@@ -704,6 +729,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
txq->sde = NULL; txq->sde = NULL;
INIT_LIST_HEAD(&txq->tx_list); INIT_LIST_HEAD(&txq->tx_list);
atomic64_set(&txq->complete_txreqs, 0); atomic64_set(&txq->complete_txreqs, 0);
atomic_set(&txq->stops, 0);
atomic_set(&txq->ring_full, 0);
atomic_set(&txq->no_desc, 0);
txq->q_idx = i; txq->q_idx = i;
txq->flow.tx_queue = 0xff; txq->flow.tx_queue = 0xff;
txq->flow.sc5 = 0xff; txq->flow.sc5 = 0xff;
...@@ -769,7 +797,7 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq) ...@@ -769,7 +797,7 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
atomic64_inc(complete_txreqs); atomic64_inc(complete_txreqs);
} }
if (hfi1_ipoib_txreqs(txq->sent_txreqs, atomic64_read(complete_txreqs))) if (hfi1_ipoib_used(txq))
dd_dev_warn(txq->priv->dd, dd_dev_warn(txq->priv->dd,
"txq %d not empty found %llu requests\n", "txq %d not empty found %llu requests\n",
txq->q_idx, txq->q_idx,
......
...@@ -373,7 +373,7 @@ void hfi1_netdev_free(struct hfi1_devdata *dd) ...@@ -373,7 +373,7 @@ void hfi1_netdev_free(struct hfi1_devdata *dd)
{ {
if (dd->dummy_netdev) { if (dd->dummy_netdev) {
dd_dev_info(dd, "hfi1 netdev freed\n"); dd_dev_info(dd, "hfi1 netdev freed\n");
free_netdev(dd->dummy_netdev); kfree(dd->dummy_netdev);
dd->dummy_netdev = NULL; dd->dummy_netdev = NULL;
} }
} }
......
...@@ -91,7 +91,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, ...@@ -91,7 +91,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
tx->mr = NULL; tx->mr = NULL;
tx->sde = priv->s_sde; tx->sde = priv->s_sde;
tx->psc = priv->s_sendcontext; tx->psc = priv->s_sendcontext;
/* so that we can test if the sdma decriptors are there */ /* so that we can test if the sdma descriptors are there */
tx->txreq.num_desc = 0; tx->txreq.num_desc = 0;
/* Set the header type */ /* Set the header type */
tx->phdr.hdr.hdr_type = priv->hdr_type; tx->phdr.hdr.hdr_type = priv->hdr_type;
......
...@@ -898,13 +898,14 @@ struct hns_roce_hw { ...@@ -898,13 +898,14 @@ struct hns_roce_hw {
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr); int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port, void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu); enum ib_mtu mtu);
int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr, int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
unsigned long mtpt_idx); struct hns_roce_mr *mr, unsigned long mtpt_idx);
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev, int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, int flags, u32 pdn, struct hns_roce_mr *mr, int flags, u32 pdn,
int mr_access_flags, u64 iova, u64 size, int mr_access_flags, u64 iova, u64 size,
void *mb_buf); void *mb_buf);
int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr); int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
struct hns_roce_mr *mr);
int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw); int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev, void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
......
...@@ -1756,10 +1756,10 @@ static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port, ...@@ -1756,10 +1756,10 @@ static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
val); val);
} }
static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
struct hns_roce_mr *mr,
unsigned long mtpt_idx) unsigned long mtpt_idx)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 }; u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v1_mpt_entry *mpt_entry; struct hns_roce_v1_mpt_entry *mpt_entry;
......
...@@ -910,7 +910,7 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev) ...@@ -910,7 +910,7 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
instance_stage = handle->rinfo.instance_state; instance_stage = handle->rinfo.instance_state;
reset_stage = handle->rinfo.reset_state; reset_stage = handle->rinfo.reset_state;
reset_cnt = ops->ae_dev_reset_cnt(handle); reset_cnt = ops->ae_dev_reset_cnt(handle);
hw_resetting = ops->get_hw_reset_stat(handle); hw_resetting = ops->get_cmdq_stat(handle);
sw_resetting = ops->ae_dev_resetting(handle); sw_resetting = ops->ae_dev_resetting(handle);
if (reset_cnt != hr_dev->reset_cnt) if (reset_cnt != hr_dev->reset_cnt)
...@@ -2529,10 +2529,10 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, ...@@ -2529,10 +2529,10 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
return hns_roce_cmq_send(hr_dev, &desc, 1); return hns_roce_cmq_send(hr_dev, &desc, 1);
} }
static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry, static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_mpt_entry *mpt_entry,
struct hns_roce_mr *mr) struct hns_roce_mr *mr)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 }; u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t pbl_ba; dma_addr_t pbl_ba;
...@@ -2571,7 +2571,8 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry, ...@@ -2571,7 +2571,8 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
return 0; return 0;
} }
static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx) unsigned long mtpt_idx)
{ {
struct hns_roce_v2_mpt_entry *mpt_entry; struct hns_roce_v2_mpt_entry *mpt_entry;
...@@ -2620,7 +2621,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, ...@@ -2620,7 +2621,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
if (mr->type == MR_TYPE_DMA) if (mr->type == MR_TYPE_DMA)
return 0; return 0;
ret = set_mtpt_pbl(mpt_entry, mr); ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
return ret; return ret;
} }
...@@ -2666,15 +2667,15 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, ...@@ -2666,15 +2667,15 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
mr->iova = iova; mr->iova = iova;
mr->size = size; mr->size = size;
ret = set_mtpt_pbl(mpt_entry, mr); ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
} }
return ret; return ret;
} }
static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
void *mb_buf, struct hns_roce_mr *mr)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v2_mpt_entry *mpt_entry; struct hns_roce_v2_mpt_entry *mpt_entry;
dma_addr_t pbl_ba = 0; dma_addr_t pbl_ba = 0;
......
...@@ -180,9 +180,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, ...@@ -180,9 +180,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
} }
if (mr->type != MR_TYPE_FRMR) if (mr->type != MR_TYPE_FRMR)
ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx); ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
mtpt_idx);
else else
ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr); ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
if (ret) { if (ret) {
dev_err(dev, "Write mtpt fail!\n"); dev_err(dev, "Write mtpt fail!\n");
goto err_page; goto err_page;
......
...@@ -1862,7 +1862,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -1862,7 +1862,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (!in) if (!in)
return -ENOMEM; return -ENOMEM;
if (MLX5_CAP_GEN(mdev, ece_support)) if (MLX5_CAP_GEN(mdev, ece_support) && ucmd)
MLX5_SET(create_qp_in, in, ece, ucmd->ece_options); MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
...@@ -2341,18 +2341,18 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -2341,18 +2341,18 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
unsigned long flags; unsigned long flags;
int err; int err;
if (qp->ibqp.rwq_ind_tbl) { if (qp->is_rss) {
destroy_rss_raw_qp_tir(dev, qp); destroy_rss_raw_qp_tir(dev, qp);
return; return;
} }
base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || base = (qp->type == IB_QPT_RAW_PACKET ||
qp->flags & IB_QP_CREATE_SOURCE_QPN) ? qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
&qp->raw_packet_qp.rq.base : &qp->raw_packet_qp.rq.base :
&qp->trans_qp.base; &qp->trans_qp.base;
if (qp->state != IB_QPS_RESET) { if (qp->state != IB_QPS_RESET) {
if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET && if (qp->type != IB_QPT_RAW_PACKET &&
!(qp->flags & IB_QP_CREATE_SOURCE_QPN)) { !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0, err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
NULL, &base->mqp, NULL); NULL, &base->mqp, NULL);
...@@ -2368,8 +2368,8 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -2368,8 +2368,8 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
base->mqp.qpn); base->mqp.qpn);
} }
get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq,
&send_cq, &recv_cq); &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx5_ib_lock_cqs(send_cq, recv_cq); mlx5_ib_lock_cqs(send_cq, recv_cq);
...@@ -2391,7 +2391,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -2391,7 +2391,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
mlx5_ib_unlock_cqs(send_cq, recv_cq); mlx5_ib_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || if (qp->type == IB_QPT_RAW_PACKET ||
qp->flags & IB_QP_CREATE_SOURCE_QPN) { qp->flags & IB_QP_CREATE_SOURCE_QPN) {
destroy_raw_packet_qp(dev, qp); destroy_raw_packet_qp(dev, qp);
} else { } else {
...@@ -2668,6 +2668,9 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -2668,6 +2668,9 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
return (create_flags) ? -EINVAL : 0; return (create_flags) ? -EINVAL : 0;
process_create_flag(dev, &create_flags,
IB_QP_CREATE_INTEGRITY_EN,
MLX5_CAP_GEN(mdev, sho), qp);
process_create_flag(dev, &create_flags, process_create_flag(dev, &create_flags,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
MLX5_CAP_GEN(mdev, block_lb_mc), qp); MLX5_CAP_GEN(mdev, block_lb_mc), qp);
...@@ -2873,7 +2876,6 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp) ...@@ -2873,7 +2876,6 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
static int check_ucmd_data(struct mlx5_ib_dev *dev, static int check_ucmd_data(struct mlx5_ib_dev *dev,
struct mlx5_create_qp_params *params) struct mlx5_create_qp_params *params)
{ {
struct ib_qp_init_attr *attr = params->attr;
struct ib_udata *udata = params->udata; struct ib_udata *udata = params->udata;
size_t size, last; size_t size, last;
int ret; int ret;
...@@ -2885,14 +2887,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev, ...@@ -2885,14 +2887,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
*/ */
last = sizeof(struct mlx5_ib_create_qp_rss); last = sizeof(struct mlx5_ib_create_qp_rss);
else else
/* IB_QPT_RAW_PACKET doesn't have ECE data */ last = offsetof(struct mlx5_ib_create_qp, reserved);
switch (attr->qp_type) {
case IB_QPT_RAW_PACKET:
last = offsetof(struct mlx5_ib_create_qp, ece_options);
break;
default:
last = offsetof(struct mlx5_ib_create_qp, reserved);
}
if (udata->inlen <= last) if (udata->inlen <= last)
return 0; return 0;
...@@ -2907,7 +2902,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev, ...@@ -2907,7 +2902,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
if (!ret) if (!ret)
mlx5_ib_dbg( mlx5_ib_dbg(
dev, dev,
"udata is not cleared, inlen = %lu, ucmd = %lu, last = %lu, size = %lu\n", "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n",
udata->inlen, params->ucmd_size, last, size); udata->inlen, params->ucmd_size, last, size);
return ret ? 0 : -EINVAL; return ret ? 0 : -EINVAL;
} }
...@@ -3002,10 +2997,18 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, ...@@ -3002,10 +2997,18 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
return &qp->ibqp; return &qp->ibqp;
destroy_qp: destroy_qp:
if (qp->type == MLX5_IB_QPT_DCT) if (qp->type == MLX5_IB_QPT_DCT) {
mlx5_ib_destroy_dct(qp); mlx5_ib_destroy_dct(qp);
else } else {
/*
* The two lines below are temp solution till QP allocation
* will be moved to be under IB/core responsiblity.
*/
qp->ibqp.send_cq = attr->send_cq;
qp->ibqp.recv_cq = attr->recv_cq;
destroy_qp_common(dev, qp, udata); destroy_qp_common(dev, qp, udata);
}
qp = NULL; qp = NULL;
free_qp: free_qp:
kfree(qp); kfree(qp);
...@@ -4162,8 +4165,6 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -4162,8 +4165,6 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (udata->outlen < min_resp_len) if (udata->outlen < min_resp_len)
return -EINVAL; return -EINVAL;
resp.response_length = min_resp_len;
/* /*
* If we don't have enough space for the ECE options, * If we don't have enough space for the ECE options,
* simply indicate it with resp.response_length. * simply indicate it with resp.response_length.
...@@ -4384,8 +4385,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev, ...@@ -4384,8 +4385,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
MLX5_GET(ads, path, src_addr_index), MLX5_GET(ads, path, src_addr_index),
MLX5_GET(ads, path, hop_limit), MLX5_GET(ads, path, hop_limit),
MLX5_GET(ads, path, tclass)); MLX5_GET(ads, path, tclass));
memcpy(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip), rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip));
MLX5_FLD_SZ_BYTES(ads, rgid_rip));
} }
} }
......
...@@ -346,6 +346,9 @@ static int get_ece_from_mbox(void *out, u16 opcode) ...@@ -346,6 +346,9 @@ static int get_ece_from_mbox(void *out, u16 opcode)
int ece = 0; int ece = 0;
switch (opcode) { switch (opcode) {
case MLX5_CMD_OP_INIT2INIT_QP:
ece = MLX5_GET(init2init_qp_out, out, ece);
break;
case MLX5_CMD_OP_INIT2RTR_QP: case MLX5_CMD_OP_INIT2RTR_QP:
ece = MLX5_GET(init2rtr_qp_out, out, ece); ece = MLX5_GET(init2rtr_qp_out, out, ece);
break; break;
...@@ -355,6 +358,9 @@ static int get_ece_from_mbox(void *out, u16 opcode) ...@@ -355,6 +358,9 @@ static int get_ece_from_mbox(void *out, u16 opcode)
case MLX5_CMD_OP_RTS2RTS_QP: case MLX5_CMD_OP_RTS2RTS_QP:
ece = MLX5_GET(rts2rts_qp_out, out, ece); ece = MLX5_GET(rts2rts_qp_out, out, ece);
break; break;
case MLX5_CMD_OP_RST2INIT_QP:
ece = MLX5_GET(rst2init_qp_out, out, ece);
break;
default: default:
break; break;
} }
...@@ -406,6 +412,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, ...@@ -406,6 +412,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
return -ENOMEM; return -ENOMEM;
MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn, MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid); opt_param_mask, qpc, uid);
MLX5_SET(rst2init_qp_in, mbox->in, ece, ece);
break; break;
case MLX5_CMD_OP_INIT2RTR_QP: case MLX5_CMD_OP_INIT2RTR_QP:
if (MBOX_ALLOC(mbox, init2rtr_qp)) if (MBOX_ALLOC(mbox, init2rtr_qp))
...@@ -439,6 +446,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, ...@@ -439,6 +446,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
return -ENOMEM; return -ENOMEM;
MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn, MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid); opt_param_mask, qpc, uid);
MLX5_SET(init2init_qp_in, mbox->in, ece, ece);
break; break;
default: default:
return -EINVAL; return -EINVAL;
......
...@@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context, ...@@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context,
if (params->cm_info) { if (params->cm_info) {
event.ird = params->cm_info->ird; event.ird = params->cm_info->ird;
event.ord = params->cm_info->ord; event.ord = params->cm_info->ord;
event.private_data_len = params->cm_info->private_data_len; /* Only connect_request and reply have valid private data
event.private_data = (void *)params->cm_info->private_data; * the rest of the events this may be left overs from
* connection establishment. CONNECT_REQUEST is issued via
* qedr_iw_mpa_request
*/
if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
event.private_data_len =
params->cm_info->private_data_len;
event.private_data =
(void *)params->cm_info->private_data;
}
} }
if (ep->cm_id) if (ep->cm_id)
......
...@@ -1204,7 +1204,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, ...@@ -1204,7 +1204,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
err = alloc_ud_wq_attr(qp, rdi->dparms.node); err = alloc_ud_wq_attr(qp, rdi->dparms.node);
if (err) { if (err) {
ret = (ERR_PTR(err)); ret = (ERR_PTR(err));
goto bail_driver_priv; goto bail_rq_rvt;
} }
if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE) if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
...@@ -1314,9 +1314,11 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, ...@@ -1314,9 +1314,11 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
bail_rq_wq: bail_rq_wq:
rvt_free_rq(&qp->r_rq);
free_ud_wq_attr(qp); free_ud_wq_attr(qp);
bail_rq_rvt:
rvt_free_rq(&qp->r_rq);
bail_driver_priv: bail_driver_priv:
rdi->driver_f.qp_priv_free(rdi, qp); rdi->driver_f.qp_priv_free(rdi, qp);
......
...@@ -139,7 +139,8 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx, ...@@ -139,7 +139,8 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
break; break;
bytes = min(bytes, len); bytes = min(bytes, len);
if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) { if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
bytes) {
copied += bytes; copied += bytes;
offset += bytes; offset += bytes;
len -= bytes; len -= bytes;
......
...@@ -4283,7 +4283,8 @@ struct mlx5_ifc_rst2init_qp_out_bits { ...@@ -4283,7 +4283,8 @@ struct mlx5_ifc_rst2init_qp_out_bits {
u8 syndrome[0x20]; u8 syndrome[0x20];
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x20];
u8 ece[0x20];
}; };
struct mlx5_ifc_rst2init_qp_in_bits { struct mlx5_ifc_rst2init_qp_in_bits {
...@@ -4300,7 +4301,7 @@ struct mlx5_ifc_rst2init_qp_in_bits { ...@@ -4300,7 +4301,7 @@ struct mlx5_ifc_rst2init_qp_in_bits {
u8 opt_param_mask[0x20]; u8 opt_param_mask[0x20];
u8 reserved_at_a0[0x20]; u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc; struct mlx5_ifc_qpc_bits qpc;
...@@ -6619,7 +6620,8 @@ struct mlx5_ifc_init2init_qp_out_bits { ...@@ -6619,7 +6620,8 @@ struct mlx5_ifc_init2init_qp_out_bits {
u8 syndrome[0x20]; u8 syndrome[0x20];
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x20];
u8 ece[0x20];
}; };
struct mlx5_ifc_init2init_qp_in_bits { struct mlx5_ifc_init2init_qp_in_bits {
...@@ -6636,7 +6638,7 @@ struct mlx5_ifc_init2init_qp_in_bits { ...@@ -6636,7 +6638,7 @@ struct mlx5_ifc_init2init_qp_in_bits {
u8 opt_param_mask[0x20]; u8 opt_param_mask[0x20];
u8 reserved_at_a0[0x20]; u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc; struct mlx5_ifc_qpc_bits qpc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment