Commit b0dfa64d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Several recent regressions and some bug fixes:

   - Typo corrupting the max_recv_sge for cxgb4

   - Regression from re-using kernel enums as a HW AbI in vmw_pvrdma

   - Sleeping inside a spinlock in hns

   - Revert the attempt to fix devlink deadlocks as the fix is more buggy

   - Typo in sysfs_emit_at conversions

   - Revert the removal of VLAN support in rxe"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  Revert "RDMA/rxe: Remove VLAN code leftovers from RXE"
  RDMA/usnic: Fix misuse of sysfs_emit_at
  Revert "RDMA/mlx5: Fix devlink deadlock on net namespace deletion"
  RDMA/hns: Use mutex instead of spinlock for ida allocation
  RDMA/vmw_pvrdma: Fix network_hdr_type reported in WC
  RDMA/cxgb4: Fix the reported max_recv_sge value
parents 25221c99 f1b0a8ea
......@@ -2474,7 +2474,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
......
......@@ -532,7 +532,7 @@ struct hns_roce_qp_table {
struct hns_roce_hem_table sccc_table;
struct mutex scc_mutex;
struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
spinlock_t bank_lock;
struct mutex bank_mutex;
};
struct hns_roce_cq_table {
......
......@@ -209,7 +209,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hr_qp->doorbell_qpn = 1;
} else {
spin_lock(&qp_table->bank_lock);
mutex_lock(&qp_table->bank_mutex);
bankid = get_least_load_bankid_for_qp(qp_table->bank);
ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
......@@ -217,12 +217,12 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if (ret) {
ibdev_err(&hr_dev->ib_dev,
"failed to alloc QPN, ret = %d\n", ret);
spin_unlock(&qp_table->bank_lock);
mutex_unlock(&qp_table->bank_mutex);
return ret;
}
qp_table->bank[bankid].inuse++;
spin_unlock(&qp_table->bank_lock);
mutex_unlock(&qp_table->bank_mutex);
hr_qp->doorbell_qpn = (u32)num;
}
......@@ -408,9 +408,9 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
spin_lock(&hr_dev->qp_table.bank_lock);
mutex_lock(&hr_dev->qp_table.bank_mutex);
hr_dev->qp_table.bank[bankid].inuse--;
spin_unlock(&hr_dev->qp_table.bank_lock);
mutex_unlock(&hr_dev->qp_table.bank_mutex);
}
static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
......@@ -1371,6 +1371,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
unsigned int i;
mutex_init(&qp_table->scc_mutex);
mutex_init(&qp_table->bank_mutex);
xa_init(&hr_dev->qp_table_xa);
reserved_from_bot = hr_dev->caps.reserved_qps;
......
......@@ -3311,8 +3311,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
int err;
dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
&dev->port[port_num].roce.nb);
err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
if (err) {
dev->port[port_num].roce.nb.notifier_call = NULL;
return err;
......@@ -3324,8 +3323,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
if (dev->port[port_num].roce.nb.notifier_call) {
unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
&dev->port[port_num].roce.nb);
unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
dev->port[port_num].roce.nb.notifier_call = NULL;
}
}
......
......@@ -214,7 +214,7 @@ static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
struct usnic_vnic_res *vnic_res;
int len;
len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu ",
len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu",
qp_grp->ibqp.qp_num,
usnic_ib_qp_grp_state_to_string(qp_grp->state),
qp_grp->owner_pid,
......@@ -224,14 +224,13 @@ static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
res_chunk = qp_grp->res_chunk_list[i];
for (j = 0; j < res_chunk->cnt; j++) {
vnic_res = res_chunk->res[j];
len += sysfs_emit_at(
buf, len, "%s[%d] ",
len += sysfs_emit_at(buf, len, " %s[%d]",
usnic_vnic_res_type_to_str(vnic_res->type),
vnic_res->vnic_idx);
}
}
len = sysfs_emit_at(buf, len, "\n");
len += sysfs_emit_at(buf, len, "\n");
return len;
}
......
......@@ -509,6 +509,20 @@ static inline int ib_send_flags_to_pvrdma(int flags)
return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
}
static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
{
switch (type) {
case PVRDMA_NETWORK_ROCE_V1:
return RDMA_NETWORK_ROCE_V1;
case PVRDMA_NETWORK_IPV4:
return RDMA_NETWORK_IPV4;
case PVRDMA_NETWORK_IPV6:
return RDMA_NETWORK_IPV6;
default:
return RDMA_NETWORK_IPV6;
}
}
void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
const struct pvrdma_qp_cap *src);
void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
......
......@@ -367,7 +367,7 @@ static int pvrdma_poll_one(struct pvrdma_cq *cq, struct pvrdma_qp **cur_qp,
wc->dlid_path_bits = cqe->dlid_path_bits;
wc->port_num = cqe->port_num;
wc->vendor_err = cqe->vendor_err;
wc->network_hdr_type = cqe->network_hdr_type;
wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type);
/* Update shared ring state */
pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
......
......@@ -8,6 +8,7 @@
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <net/udp_tunnel.h>
#include <net/sch_generic.h>
#include <linux/netfilter.h>
......@@ -153,9 +154,14 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct udphdr *udph;
struct net_device *ndev = skb->dev;
struct net_device *rdev = ndev;
struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
if (!rxe && is_vlan_dev(rdev)) {
rdev = vlan_dev_real_dev(ndev);
rxe = rxe_get_dev_from_net(rdev);
}
if (!rxe)
goto drop;
......
......@@ -872,6 +872,11 @@ static enum resp_states do_complete(struct rxe_qp *qp,
else
wc->network_hdr_type = RDMA_NETWORK_IPV6;
if (is_vlan_dev(skb->dev)) {
wc->wc_flags |= IB_WC_WITH_VLAN;
wc->vlan_id = vlan_dev_vlan_id(skb->dev);
}
if (pkt->mask & RXE_IMMDT_MASK) {
wc->wc_flags |= IB_WC_WITH_IMM;
wc->ex.imm_data = immdt_imm(pkt);
......
......@@ -90,4 +90,9 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
u32 key_type, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
{
return devlink_net(priv_to_devlink(dev));
}
#endif
......@@ -1209,22 +1209,4 @@ static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
return val.vbool;
}
/**
* mlx5_core_net - Provide net namespace of the mlx5_core_dev
* @dev: mlx5 core device
*
* mlx5_core_net() returns the net namespace of mlx5 core device.
* This can be called only in below described limited context.
* (a) When a devlink instance for mlx5_core is registered and
* when devlink reload operation is disabled.
* or
* (b) during devlink reload reload_down() and reload_up callbacks
* where it is ensured that devlink instance's net namespace is
* stable.
*/
static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
{
return devlink_net(priv_to_devlink(dev));
}
#endif /* MLX5_DRIVER_H */
......@@ -133,6 +133,13 @@ enum pvrdma_wc_flags {
PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
};
enum pvrdma_network_type {
PVRDMA_NETWORK_IB,
PVRDMA_NETWORK_ROCE_V1 = PVRDMA_NETWORK_IB,
PVRDMA_NETWORK_IPV4,
PVRDMA_NETWORK_IPV6
};
struct pvrdma_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 reserved;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment