Commit 374c5285 authored by Devesh Sharma's avatar Devesh Sharma Committed by Jason Gunthorpe

RDMA/bnxt_re: Enable GSI QP support for 57500 series

In the new 57500 series of adapters the GSI qp is a UD type QP unlike the
previous generation where it was a Raw Eth QP. Changing the control and
data path to support the same. Listing all the significant diffs:

 - AH creation resolve network type unconditionally
 - Add check at relevant places to distinguish from Raw Eth
   processing flow.
 - bnxt_re_process_res_ud_wc report completion with GRH flag
   when qp is GSI.
 - Change length, cfa_meta and smac to match new driver/hardware
   interface.
 - Add new driver/hardware interface.
Signed-off-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: default avatarDevesh Sharma <devesh.sharma@broadcom.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent e0387e1d
......@@ -663,17 +663,36 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
return 0;
}
static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
{
u8 nw_type;
switch (ntype) {
case RDMA_NETWORK_IPV4:
nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
break;
case RDMA_NETWORK_IPV6:
nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
break;
default:
nw_type = CMDQ_CREATE_AH_TYPE_V1;
break;
}
return nw_type;
}
struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
struct rdma_ah_attr *ah_attr,
u32 flags,
struct ib_udata *udata)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
struct bnxt_re_dev *rdev = pd->rdev;
const struct ib_gid_attr *sgid_attr;
struct bnxt_re_ah *ah;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
int rc;
u8 nw_type;
int rc;
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
......@@ -700,28 +719,11 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
ah->qplib_ah.flow_label = grh->flow_label;
ah->qplib_ah.hop_limit = grh->hop_limit;
ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
if (udata &&
!rdma_is_multicast_addr((struct in6_addr *)
grh->dgid.raw) &&
!rdma_link_local_addr((struct in6_addr *)
grh->dgid.raw)) {
const struct ib_gid_attr *sgid_attr;
sgid_attr = grh->sgid_attr;
/* Get network header type for this GID */
nw_type = rdma_gid_attr_network_type(sgid_attr);
switch (nw_type) {
case RDMA_NETWORK_IPV4:
ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
break;
case RDMA_NETWORK_IPV6:
ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
break;
default:
ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
break;
}
}
sgid_attr = grh->sgid_attr;
/* Get network header type for this GID */
nw_type = rdma_gid_attr_network_type(sgid_attr);
ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
......@@ -1065,12 +1067,17 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
qp->qplib_qp.pd = &pd->qplib_pd;
qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
if (qp_init_attr->qp_type == IB_QPT_GSI &&
bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
qp->qplib_qp.type = CMDQ_CREATE_QP_TYPE_GSI;
if (qp->qplib_qp.type == IB_QPT_MAX) {
dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
qp->qplib_qp.type);
rc = -EINVAL;
goto fail;
}
qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
IB_SIGNAL_ALL_WR) ? true : false);
......@@ -1131,7 +1138,8 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
if (qp_init_attr->qp_type == IB_QPT_GSI) {
if (qp_init_attr->qp_type == IB_QPT_GSI &&
!(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) {
/* Allocate 1 more than what's provided */
entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
......@@ -2091,7 +2099,8 @@ static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
static int is_ud_qp(struct bnxt_re_qp *qp)
{
return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
}
static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
......@@ -2395,7 +2404,7 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
if (ib_qp->qp_type == IB_QPT_GSI) {
if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
payload_sz);
if (rc)
......@@ -2525,7 +2534,8 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
wqe.wr_id = wr->wr_id;
wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
if (ib_qp->qp_type == IB_QPT_GSI)
if (ib_qp->qp_type == IB_QPT_GSI &&
qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
payload_sz);
if (!rc)
......@@ -3120,19 +3130,33 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
}
}
static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
struct ib_wc *wc,
struct bnxt_qplib_cqe *cqe)
{
u8 nw_type;
wc->opcode = IB_WC_RECV;
wc->status = __rc_to_ib_wc_status(cqe->status);
if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
wc->wc_flags |= IB_WC_WITH_IMM;
if (cqe->flags & CQ_RES_RC_FLAGS_INV)
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
(CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
/* report only on GSI QP for Thor */
if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
wc->wc_flags |= IB_WC_GRH;
memcpy(wc->smac, cqe->smac, ETH_ALEN);
wc->wc_flags |= IB_WC_WITH_SMAC;
if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
wc->vlan_id = (cqe->cfa_meta & 0xFFF);
if (wc->vlan_id < 0x1000)
wc->wc_flags |= IB_WC_WITH_VLAN;
}
nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
}
}
static int send_phantom_wqe(struct bnxt_re_qp *qp)
......@@ -3224,7 +3248,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
switch (cqe->opcode) {
case CQ_BASE_CQE_TYPE_REQ:
if (qp->qplib_qp.id ==
if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
qp->rdev->qp1_sqp->qplib_qp.id) {
/* Handle this completion with
* the stored completion
......@@ -3259,7 +3283,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
bnxt_re_process_res_rc_wc(wc, cqe);
break;
case CQ_BASE_CQE_TYPE_RES_UD:
if (qp->qplib_qp.id ==
if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
qp->rdev->qp1_sqp->qplib_qp.id) {
/* Handle this completion with
* the stored completion
......@@ -3272,7 +3296,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
break;
}
}
bnxt_re_process_res_ud_wc(wc, cqe);
bnxt_re_process_res_ud_wc(qp, wc, cqe);
break;
default:
dev_err(rdev_to_dev(cq->rdev),
......
......@@ -44,6 +44,7 @@
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/prefetch.h>
#include <linux/if_ether.h>
#include "roce_hsi.h"
......@@ -1622,7 +1623,8 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
((offsetof(typeof(*sqe), data) + 15) >> 4);
sqe->inv_key_or_imm_data = cpu_to_le32(
wqe->send.inv_key);
if (qp->type == CMDQ_CREATE_QP_TYPE_UD) {
if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
sqe->q_key = cpu_to_le32(wqe->send.q_key);
sqe->dst_qp = cpu_to_le32(
wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
......@@ -2408,12 +2410,14 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
}
cqe = *pcqe;
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
cqe->length = le32_to_cpu(hwcqe->length);
cqe->length = (u32)le16_to_cpu(hwcqe->length);
cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
cqe->flags = le16_to_cpu(hwcqe->flags);
cqe->status = hwcqe->status;
cqe->qp_handle = (u64)(unsigned long)qp;
memcpy(cqe->smac, hwcqe->src_mac, 6);
/*FIXME: Endianness fix needed for smace */
memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
......
......@@ -347,6 +347,7 @@ struct bnxt_qplib_cqe {
u8 type;
u8 opcode;
u32 length;
u16 cfa_meta;
u64 wr_id;
union {
__be32 immdata;
......
......@@ -515,22 +515,24 @@ struct cq_res_rc {
/* Responder UD CQE (32 bytes) */
struct cq_res_ud {
__le32 length;
__le16 length;
#define CQ_RES_UD_LENGTH_MASK 0x3fffUL
#define CQ_RES_UD_LENGTH_SFT 0
#define CQ_RES_UD_RESERVED18_MASK 0xffffc000UL
#define CQ_RES_UD_RESERVED18_SFT 14
__le16 cfa_metadata;
#define CQ_RES_UD_CFA_METADATA_VID_MASK 0xfffUL
#define CQ_RES_UD_CFA_METADATA_VID_SFT 0
#define CQ_RES_UD_CFA_METADATA_DE 0x1000UL
#define CQ_RES_UD_CFA_METADATA_PRI_MASK 0xe000UL
#define CQ_RES_UD_CFA_METADATA_PRI_SFT 13
__le32 imm_data;
__le64 qp_handle;
__le16 src_mac[3];
__le16 src_qp_low;
u8 cqe_type_toggle;
#define CQ_RES_UD_TOGGLE 0x1UL
#define CQ_RES_UD_CQE_TYPE_MASK 0x1eUL
#define CQ_RES_UD_CQE_TYPE_SFT 1
#define CQ_RES_UD_TOGGLE 0x1UL
#define CQ_RES_UD_CQE_TYPE_MASK 0x1eUL
#define CQ_RES_UD_CQE_TYPE_SFT 1
#define CQ_RES_UD_CQE_TYPE_RES_UD (0x2UL << 1)
#define CQ_RES_UD_RESERVED3_MASK 0xe0UL
#define CQ_RES_UD_RESERVED3_SFT 5
u8 status;
#define CQ_RES_UD_STATUS_OK 0x0UL
#define CQ_RES_UD_STATUS_LOCAL_ACCESS_ERROR 0x1UL
......@@ -546,18 +548,30 @@ struct cq_res_ud {
#define CQ_RES_UD_FLAGS_SRQ_SRQ (0x1UL << 0)
#define CQ_RES_UD_FLAGS_SRQ_LAST CQ_RES_UD_FLAGS_SRQ_SRQ
#define CQ_RES_UD_FLAGS_IMM 0x2UL
#define CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK 0xcUL
#define CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT 2
#define CQ_RES_UD_FLAGS_ROCE_IP_VER_V1 (0x0UL << 2)
#define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 2)
#define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 2)
#define CQ_RES_UD_FLAGS_UNUSED_MASK 0xcUL
#define CQ_RES_UD_FLAGS_UNUSED_SFT 2
#define CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK 0x30UL
#define CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT 4
#define CQ_RES_UD_FLAGS_ROCE_IP_VER_V1 (0x0UL << 4)
#define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV4 (0x2UL << 4)
#define CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6 (0x3UL << 4)
#define CQ_RES_UD_FLAGS_ROCE_IP_VER_LAST \
CQ_RES_UD_FLAGS_ROCE_IP_VER_V2IPV6
#define CQ_RES_UD_FLAGS_META_FORMAT_MASK 0x3c0UL
#define CQ_RES_UD_FLAGS_META_FORMAT_SFT 6
#define CQ_RES_UD_FLAGS_META_FORMAT_NONE (0x0UL << 6)
#define CQ_RES_UD_FLAGS_META_FORMAT_VLAN (0x1UL << 6)
#define CQ_RES_UD_FLAGS_META_FORMAT_TUNNEL_ID (0x2UL << 6)
#define CQ_RES_UD_FLAGS_META_FORMAT_CHDR_DATA (0x3UL << 6)
#define CQ_RES_UD_FLAGS_META_FORMAT_HDR_OFFSET (0x4UL << 6)
#define CQ_RES_UD_FLAGS_META_FORMAT_LAST \
CQ_RES_UD_FLAGS_META_FORMAT_HDR_OFFSET
#define CQ_RES_UD_FLAGS_EXT_META_FORMAT_MASK 0xc00UL
#define CQ_RES_UD_FLAGS_EXT_META_FORMAT_SFT 10
__le32 src_qp_high_srq_or_rq_wr_id;
#define CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK 0xfffffUL
#define CQ_RES_UD_SRQ_OR_RQ_WR_ID_SFT 0
#define CQ_RES_UD_RESERVED4_MASK 0xf00000UL
#define CQ_RES_UD_RESERVED4_SFT 20
#define CQ_RES_UD_SRC_QP_HIGH_MASK 0xff000000UL
#define CQ_RES_UD_SRC_QP_HIGH_SFT 24
};
......@@ -993,6 +1007,7 @@ struct cmdq_create_qp {
#define CMDQ_CREATE_QP_TYPE_RC 0x2UL
#define CMDQ_CREATE_QP_TYPE_UD 0x4UL
#define CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE 0x6UL
#define CMDQ_CREATE_QP_TYPE_GSI 0x7UL
u8 sq_pg_size_sq_lvl;
#define CMDQ_CREATE_QP_SQ_LVL_MASK 0xfUL
#define CMDQ_CREATE_QP_SQ_LVL_SFT 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment