Commit 2da95be9 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-Add-iWARP-support-for-QL4xxxx'

Michal Kalderon says:

====================
qed: Add iWARP support for QL4xxxx

This patch series adds iWARP support to our QL4xxxx networking adapters.
The code changes span across qed and qedr drivers, but this series contains
changes to qed only. Once the series is accepted, the qedr series will
be submitted to the rdma tree.
There is one additional qed patch which enables the iWARP, this patch is
delayed until the qedr series will be accepted.

The patches were previously sent as an RFC, and these are the first 12
patches in the RFC series:
https://www.spinics.net/lists/linux-rdma/msg51416.html

This series was tested and built against net-next.

MAINTAINERS file is not updated in this PATCH as there is a pending patch
for qedr driver update https://patchwork.kernel.org/patch/9752761.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a5192c52 93c45984
...@@ -5,6 +5,6 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ ...@@ -5,6 +5,6 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o
qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o
qed-$(CONFIG_QED_FCOE) += qed_fcoe.o qed-$(CONFIG_QED_FCOE) += qed_fcoe.o
...@@ -210,14 +210,16 @@ struct qed_tunn_update_params { ...@@ -210,14 +210,16 @@ struct qed_tunn_update_params {
/* The PCI personality is not quite synonymous to protocol ID: /* The PCI personality is not quite synonymous to protocol ID:
* 1. All personalities need CORE connections * 1. All personalities need CORE connections
* 2. The Ethernet personality may support also the RoCE protocol * 2. The Ethernet personality may support also the RoCE/iWARP protocol
*/ */
enum qed_pci_personality { enum qed_pci_personality {
QED_PCI_ETH, QED_PCI_ETH,
QED_PCI_FCOE, QED_PCI_FCOE,
QED_PCI_ISCSI, QED_PCI_ISCSI,
QED_PCI_ETH_ROCE, QED_PCI_ETH_ROCE,
QED_PCI_DEFAULT /* default in shmem */ QED_PCI_ETH_IWARP,
QED_PCI_ETH_RDMA,
QED_PCI_DEFAULT, /* default in shmem */
}; };
/* All VFs are symmetric, all counters are PF + all VFs */ /* All VFs are symmetric, all counters are PF + all VFs */
...@@ -277,6 +279,7 @@ enum qed_dev_cap { ...@@ -277,6 +279,7 @@ enum qed_dev_cap {
QED_DEV_CAP_FCOE, QED_DEV_CAP_FCOE,
QED_DEV_CAP_ISCSI, QED_DEV_CAP_ISCSI,
QED_DEV_CAP_ROCE, QED_DEV_CAP_ROCE,
QED_DEV_CAP_IWARP,
}; };
enum qed_wol_support { enum qed_wol_support {
...@@ -287,6 +290,23 @@ enum qed_wol_support { ...@@ -287,6 +290,23 @@ enum qed_wol_support {
struct qed_hw_info { struct qed_hw_info {
/* PCI personality */ /* PCI personality */
enum qed_pci_personality personality; enum qed_pci_personality personality;
#define QED_IS_RDMA_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
(dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
(dev)->hw_info.personality == QED_PCI_ETH_RDMA)
#define QED_IS_ROCE_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
(dev)->hw_info.personality == QED_PCI_ETH_RDMA)
#define QED_IS_IWARP_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
(dev)->hw_info.personality == QED_PCI_ETH_RDMA)
#define QED_IS_L2_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH || \
QED_IS_RDMA_PERSONALITY(dev))
#define QED_IS_FCOE_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_FCOE)
#define QED_IS_ISCSI_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ISCSI)
/* Resource Allocation scheme results */ /* Resource Allocation scheme results */
u32 resc_start[QED_MAX_RESC]; u32 resc_start[QED_MAX_RESC];
...@@ -759,7 +779,7 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, ...@@ -759,7 +779,7 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
} }
#define PURE_LB_TC 8 #define PURE_LB_TC 8
#define OOO_LB_TC 9 #define PKT_LB_TC 9
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
...@@ -769,6 +789,8 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, ...@@ -769,6 +789,8 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_device_num_engines(struct qed_dev *cdev); int qed_device_num_engines(struct qed_dev *cdev);
int qed_device_get_port_id(struct qed_dev *cdev); int qed_device_get_port_id(struct qed_dev *cdev);
void qed_set_fw_mac_addr(__le16 *fw_msb,
__le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
......
...@@ -246,14 +246,16 @@ struct qed_cxt_mngr { ...@@ -246,14 +246,16 @@ struct qed_cxt_mngr {
static bool src_proto(enum protocol_type type) static bool src_proto(enum protocol_type type)
{ {
return type == PROTOCOLID_ISCSI || return type == PROTOCOLID_ISCSI ||
type == PROTOCOLID_FCOE; type == PROTOCOLID_FCOE ||
type == PROTOCOLID_IWARP;
} }
static bool tm_cid_proto(enum protocol_type type) static bool tm_cid_proto(enum protocol_type type)
{ {
return type == PROTOCOLID_ISCSI || return type == PROTOCOLID_ISCSI ||
type == PROTOCOLID_FCOE || type == PROTOCOLID_FCOE ||
type == PROTOCOLID_ROCE; type == PROTOCOLID_ROCE ||
type == PROTOCOLID_IWARP;
} }
static bool tm_tid_proto(enum protocol_type type) static bool tm_tid_proto(enum protocol_type type)
...@@ -853,7 +855,7 @@ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines) ...@@ -853,7 +855,7 @@ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
if (!excess_lines) if (!excess_lines)
return 0; return 0;
if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
return 0; return 0;
p_mngr = p_hwfn->p_cxt_mngr; p_mngr = p_hwfn->p_cxt_mngr;
...@@ -1033,7 +1035,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, ...@@ -1033,7 +1035,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
u32 lines, line, sz_left, lines_to_skip = 0; u32 lines, line, sz_left, lines_to_skip = 0;
/* Special handling for RoCE that supports dynamic allocation */ /* Special handling for RoCE that supports dynamic allocation */
if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) && if (QED_IS_RDMA_PERSONALITY(p_hwfn) &&
((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM)) ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
return 0; return 0;
...@@ -1833,7 +1835,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) ...@@ -1833,7 +1835,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
tm_offset += tm_iids.pf_tids[i]; tm_offset += tm_iids.pf_tids[i];
} }
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) if (QED_IS_RDMA_PERSONALITY(p_hwfn))
active_seg_mask = 0; active_seg_mask = 0;
STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask); STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
...@@ -2068,6 +2070,11 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, ...@@ -2068,6 +2070,11 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs); num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
switch (p_hwfn->hw_info.personality) { switch (p_hwfn->hw_info.personality) {
case QED_PCI_ETH_IWARP:
/* Each QP requires one connection */
num_cons = min_t(u32, IWARP_MAX_QPS, p_params->num_qps);
proto = PROTOCOLID_IWARP;
break;
case QED_PCI_ETH_ROCE: case QED_PCI_ETH_ROCE:
num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps); num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
num_cons = num_qps * 2; /* each QP requires two connections */ num_cons = num_qps * 2; /* each QP requires two connections */
...@@ -2103,6 +2110,8 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) ...@@ -2103,6 +2110,8 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
switch (p_hwfn->hw_info.personality) { switch (p_hwfn->hw_info.personality) {
case QED_PCI_ETH_RDMA:
case QED_PCI_ETH_IWARP:
case QED_PCI_ETH_ROCE: case QED_PCI_ETH_ROCE:
{ {
qed_rdma_set_pf_params(p_hwfn, qed_rdma_set_pf_params(p_hwfn,
...@@ -2344,7 +2353,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, ...@@ -2344,7 +2353,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
last_cid_allocated - 1); last_cid_allocated - 1);
if (!p_hwfn->b_rdma_enabled_in_prs) { if (!p_hwfn->b_rdma_enabled_in_prs) {
/* Enable RoCE search */ /* Enable RDMA search */
qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
p_hwfn->b_rdma_enabled_in_prs = true; p_hwfn->b_rdma_enabled_in_prs = true;
} }
......
...@@ -216,6 +216,10 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn) ...@@ -216,6 +216,10 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
case QED_PCI_ETH_ROCE: case QED_PCI_ETH_ROCE:
flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
break; break;
case QED_PCI_ETH_IWARP:
flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
PQ_FLAGS_OFLD;
break;
default: default:
DP_ERR(p_hwfn, DP_ERR(p_hwfn,
"unknown personality %d\n", p_hwfn->hw_info.personality); "unknown personality %d\n", p_hwfn->hw_info.personality);
...@@ -936,9 +940,16 @@ int qed_resc_alloc(struct qed_dev *cdev) ...@@ -936,9 +940,16 @@ int qed_resc_alloc(struct qed_dev *cdev)
/* EQ */ /* EQ */
n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
enum protocol_type rdma_proto;
if (QED_IS_ROCE_PERSONALITY(p_hwfn))
rdma_proto = PROTOCOLID_ROCE;
else
rdma_proto = PROTOCOLID_IWARP;
num_cons = qed_cxt_get_proto_cid_count(p_hwfn, num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
PROTOCOLID_ROCE, rdma_proto,
NULL) * 2; NULL) * 2;
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
...@@ -2057,7 +2068,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -2057,7 +2068,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
qed_int_get_num_sbs(p_hwfn, &sb_cnt); qed_int_get_num_sbs(p_hwfn, &sb_cnt);
if (IS_ENABLED(CONFIG_QED_RDMA) && if (IS_ENABLED(CONFIG_QED_RDMA) &&
p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { QED_IS_RDMA_PERSONALITY(p_hwfn)) {
/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
* the status blocks equally between L2 / RoCE but with * the status blocks equally between L2 / RoCE but with
* consideration as to how many l2 queues / cnqs we have. * consideration as to how many l2 queues / cnqs we have.
...@@ -2068,9 +2079,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -2068,9 +2079,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
non_l2_sbs = feat_num[QED_RDMA_CNQ]; non_l2_sbs = feat_num[QED_RDMA_CNQ];
} }
if (QED_IS_L2_PERSONALITY(p_hwfn)) {
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
p_hwfn->hw_info.personality == QED_PCI_ETH) {
/* Start by allocating VF queues, then PF's */ /* Start by allocating VF queues, then PF's */
feat_num[QED_VF_L2_QUE] = min_t(u32, feat_num[QED_VF_L2_QUE] = min_t(u32,
RESC_NUM(p_hwfn, QED_L2_QUEUE), RESC_NUM(p_hwfn, QED_L2_QUEUE),
...@@ -2083,12 +2092,12 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -2083,12 +2092,12 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
QED_VF_L2_QUE)); QED_VF_L2_QUE));
} }
if (p_hwfn->hw_info.personality == QED_PCI_FCOE) if (QED_IS_FCOE_PERSONALITY(p_hwfn))
feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt, feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt,
RESC_NUM(p_hwfn, RESC_NUM(p_hwfn,
QED_CMDQS_CQS)); QED_CMDQS_CQS));
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt, feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
RESC_NUM(p_hwfn, RESC_NUM(p_hwfn,
QED_CMDQS_CQS)); QED_CMDQS_CQS));
...@@ -4122,3 +4131,14 @@ int qed_device_get_port_id(struct qed_dev *cdev) ...@@ -4122,3 +4131,14 @@ int qed_device_get_port_id(struct qed_dev *cdev)
{ {
return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev); return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev);
} }
void qed_set_fw_mac_addr(__le16 *fw_msb,
__le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
{
((u8 *)fw_msb)[0] = mac[1];
((u8 *)fw_msb)[1] = mac[0];
((u8 *)fw_mid)[0] = mac[3];
((u8 *)fw_mid)[1] = mac[2];
((u8 *)fw_lsb)[0] = mac[5];
((u8 *)fw_lsb)[1] = mac[4];
}
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <linux/qed/fcoe_common.h> #include <linux/qed/fcoe_common.h>
#include <linux/qed/eth_common.h> #include <linux/qed/eth_common.h>
#include <linux/qed/iscsi_common.h> #include <linux/qed/iscsi_common.h>
#include <linux/qed/iwarp_common.h>
#include <linux/qed/rdma_common.h> #include <linux/qed/rdma_common.h>
#include <linux/qed/roce_common.h> #include <linux/qed/roce_common.h>
#include <linux/qed/qed_fcoe_if.h> #include <linux/qed/qed_fcoe_if.h>
......
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and /or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/spinlock.h>
#include <linux/tcp.h>
#include "qed_cxt.h"
#include "qed_hw.h"
#include "qed_ll2.h"
#include "qed_rdma.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#define QED_IWARP_ORD_DEFAULT 32
#define QED_IWARP_IRD_DEFAULT 32
#define QED_IWARP_MAX_FW_MSS 4120
#define QED_EP_SIG 0xecabcdef
struct mpa_v2_hdr {
__be16 ird;
__be16 ord;
};
#define MPA_V2_PEER2PEER_MODEL 0x8000
#define MPA_V2_SEND_RTR 0x4000 /* on ird */
#define MPA_V2_READ_RTR 0x4000 /* on ord */
#define MPA_V2_WRITE_RTR 0x8000
#define MPA_V2_IRD_ORD_MASK 0x3FFF
#define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
#define QED_IWARP_INVALID_TCP_CID 0xffffffff
#define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024)
#define TIMESTAMP_HEADER_SIZE (12)
#define QED_IWARP_TS_EN BIT(0)
#define QED_IWARP_DA_EN BIT(1)
#define QED_IWARP_PARAM_CRC_NEEDED (1)
#define QED_IWARP_PARAM_P2P (1)
static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code, u16 echo,
union event_ring_data *data,
u8 fw_return_code);
/* Override devinfo with iWARP specific values */
void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
dev->max_qp = min_t(u32,
IWARP_MAX_QPS,
p_hwfn->p_rdma_info->num_qps) -
QED_IWARP_PREALLOC_CNT;
dev->max_cq = dev->max_qp;
dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
}
void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
p_hwfn->b_rdma_enabled_in_prs = true;
}
/* We have two cid maps, one for tcp which should be used only from passive
* syn processing and replacing a pre-allocated ep in the list. The second
* for active tcp and for QPs.
*/
static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
{
cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
if (cid < QED_IWARP_PREALLOC_CNT)
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
cid);
else
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
{
int rc;
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
if (rc) {
DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
return rc;
}
*cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
if (rc)
qed_iwarp_cid_cleaned(p_hwfn, *cid);
return rc;
}
static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
{
cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
/* This function allocates a cid for passive tcp (called from syn receive)
* the reason it's separate from the regular cid allocation is because it
* is assured that these cids already have ilt allocated. They are preallocated
* to ensure that we won't need to allocate memory during syn processing
*/
static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
{
int rc;
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
rc = qed_rdma_bmap_alloc_id(p_hwfn,
&p_hwfn->p_rdma_info->tcp_cid_map, cid);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"can't allocate iwarp tcp cid max-count=%d\n",
p_hwfn->p_rdma_info->tcp_cid_map.max_count);
*cid = QED_IWARP_INVALID_TCP_CID;
return rc;
}
*cid += qed_cxt_get_proto_cid_start(p_hwfn,
p_hwfn->p_rdma_info->proto);
return 0;
}
int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
struct qed_rdma_create_qp_out_params *out_params)
{
struct iwarp_create_qp_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent;
u16 physical_queue;
u32 cid;
int rc;
qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
IWARP_SHARED_QUEUE_PAGE_SIZE,
&qp->shared_queue_phys_addr,
GFP_KERNEL);
if (!qp->shared_queue)
return -ENOMEM;
out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
if (rc)
goto err1;
qp->icid = (u16)cid;
memset(&init_data, 0, sizeof(init_data));
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.cid = qp->icid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
IWARP_RAMROD_CMD_ID_CREATE_QP,
PROTOCOLID_IWARP, &init_data);
if (rc)
goto err2;
p_ramrod = &p_ent->ramrod.iwarp_create_qp;
SET_FIELD(p_ramrod->flags,
IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
qp->fmr_and_reserved_lkey);
SET_FIELD(p_ramrod->flags,
IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
SET_FIELD(p_ramrod->flags,
IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
qp->incoming_rdma_read_en);
SET_FIELD(p_ramrod->flags,
IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
qp->incoming_rdma_write_en);
SET_FIELD(p_ramrod->flags,
IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
qp->incoming_atomic_en);
SET_FIELD(p_ramrod->flags,
IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
p_ramrod->pd = qp->pd;
p_ramrod->sq_num_pages = qp->sq_num_pages;
p_ramrod->rq_num_pages = qp->rq_num_pages;
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
p_ramrod->cq_cid_for_sq =
cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
p_ramrod->cq_cid_for_rq =
cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (rc)
goto err2;
return rc;
err2:
qed_iwarp_cid_cleaned(p_hwfn, cid);
err1:
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
IWARP_SHARED_QUEUE_PAGE_SIZE,
qp->shared_queue, qp->shared_queue_phys_addr);
return rc;
}
static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
struct iwarp_modify_qp_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent;
int rc;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qp->icid;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
IWARP_RAMROD_CMD_ID_MODIFY_QP,
p_hwfn->p_rdma_info->proto, &init_data);
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
0x1);
if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
else
p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
return rc;
}
enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
{
switch (state) {
case QED_ROCE_QP_STATE_RESET:
case QED_ROCE_QP_STATE_INIT:
case QED_ROCE_QP_STATE_RTR:
return QED_IWARP_QP_STATE_IDLE;
case QED_ROCE_QP_STATE_RTS:
return QED_IWARP_QP_STATE_RTS;
case QED_ROCE_QP_STATE_SQD:
return QED_IWARP_QP_STATE_CLOSING;
case QED_ROCE_QP_STATE_ERR:
return QED_IWARP_QP_STATE_ERROR;
case QED_ROCE_QP_STATE_SQE:
return QED_IWARP_QP_STATE_TERMINATE;
default:
return QED_IWARP_QP_STATE_ERROR;
}
}
static enum qed_roce_qp_state
qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
{
switch (state) {
case QED_IWARP_QP_STATE_IDLE:
return QED_ROCE_QP_STATE_INIT;
case QED_IWARP_QP_STATE_RTS:
return QED_ROCE_QP_STATE_RTS;
case QED_IWARP_QP_STATE_TERMINATE:
return QED_ROCE_QP_STATE_SQE;
case QED_IWARP_QP_STATE_CLOSING:
return QED_ROCE_QP_STATE_SQD;
case QED_IWARP_QP_STATE_ERROR:
return QED_ROCE_QP_STATE_ERR;
default:
return QED_ROCE_QP_STATE_ERR;
}
}
const char *iwarp_state_names[] = {
"IDLE",
"RTS",
"TERMINATE",
"CLOSING",
"ERROR",
};
int
qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
enum qed_iwarp_qp_state new_state, bool internal)
{
enum qed_iwarp_qp_state prev_iw_state;
bool modify_fw = false;
int rc = 0;
/* modify QP can be called from upper-layer or as a result of async
* RST/FIN... therefore need to protect
*/
spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
prev_iw_state = qp->iwarp_state;
if (prev_iw_state == new_state) {
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
return 0;
}
switch (prev_iw_state) {
case QED_IWARP_QP_STATE_IDLE:
switch (new_state) {
case QED_IWARP_QP_STATE_RTS:
qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
break;
case QED_IWARP_QP_STATE_ERROR:
qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
if (!internal)
modify_fw = true;
break;
default:
break;
}
break;
case QED_IWARP_QP_STATE_RTS:
switch (new_state) {
case QED_IWARP_QP_STATE_CLOSING:
if (!internal)
modify_fw = true;
qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
break;
case QED_IWARP_QP_STATE_ERROR:
if (!internal)
modify_fw = true;
qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
break;
default:
break;
}
break;
case QED_IWARP_QP_STATE_ERROR:
switch (new_state) {
case QED_IWARP_QP_STATE_IDLE:
qp->iwarp_state = new_state;
break;
case QED_IWARP_QP_STATE_CLOSING:
/* could happen due to race... do nothing.... */
break;
default:
rc = -EINVAL;
}
break;
case QED_IWARP_QP_STATE_TERMINATE:
case QED_IWARP_QP_STATE_CLOSING:
qp->iwarp_state = new_state;
break;
default:
break;
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
qp->icid,
iwarp_state_names[prev_iw_state],
iwarp_state_names[qp->iwarp_state],
internal ? "internal" : "");
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
if (modify_fw)
rc = qed_iwarp_modify_fw(p_hwfn, qp);
return rc;
}
int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent;
int rc;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qp->icid;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
IWARP_RAMROD_CMD_ID_DESTROY_QP,
p_hwfn->p_rdma_info->proto, &init_data);
if (rc)
return rc;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
return rc;
}
static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
struct qed_iwarp_ep *ep,
bool remove_from_active_list)
{
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(*ep->ep_buffer_virt),
ep->ep_buffer_virt, ep->ep_buffer_phys);
if (remove_from_active_list) {
spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
list_del(&ep->list_entry);
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
}
if (ep->qp)
ep->qp->ep = NULL;
kfree(ep);
}
int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
struct qed_iwarp_ep *ep = qp->ep;
int wait_count = 0;
int rc = 0;
if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
rc = qed_iwarp_modify_qp(p_hwfn, qp,
QED_IWARP_QP_STATE_ERROR, false);
if (rc)
return rc;
}
/* Make sure ep is closed before returning and freeing memory. */
if (ep) {
while (ep->state != QED_IWARP_EP_CLOSED && wait_count++ < 200)
msleep(100);
if (ep->state != QED_IWARP_EP_CLOSED)
DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
ep->state);
qed_iwarp_destroy_ep(p_hwfn, ep, false);
}
rc = qed_iwarp_fw_destroy(p_hwfn, qp);
if (qp->shared_queue)
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
IWARP_SHARED_QUEUE_PAGE_SIZE,
qp->shared_queue, qp->shared_queue_phys_addr);
return rc;
}
static int
qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
{
struct qed_iwarp_ep *ep;
int rc;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (!ep)
return -ENOMEM;
ep->state = QED_IWARP_EP_INIT;
ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(*ep->ep_buffer_virt),
&ep->ep_buffer_phys,
GFP_KERNEL);
if (!ep->ep_buffer_virt) {
rc = -ENOMEM;
goto err;
}
ep->sig = QED_EP_SIG;
*ep_out = ep;
return 0;
err:
kfree(ep);
return rc;
}
static void
qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
{
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
p_tcp_ramrod->tcp.local_mac_addr_lo,
p_tcp_ramrod->tcp.local_mac_addr_mid,
p_tcp_ramrod->tcp.local_mac_addr_hi,
p_tcp_ramrod->tcp.remote_mac_addr_lo,
p_tcp_ramrod->tcp.remote_mac_addr_mid,
p_tcp_ramrod->tcp.remote_mac_addr_hi);
if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"local_ip=%pI4h:%x, remote_ip=%pI4h%x, vlan=%x\n",
p_tcp_ramrod->tcp.local_ip,
p_tcp_ramrod->tcp.local_port,
p_tcp_ramrod->tcp.remote_ip,
p_tcp_ramrod->tcp.remote_port,
p_tcp_ramrod->tcp.vlan_id);
} else {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"local_ip=%pI6h:%x, remote_ip=%pI6h:%x, vlan=%x\n",
p_tcp_ramrod->tcp.local_ip,
p_tcp_ramrod->tcp.local_port,
p_tcp_ramrod->tcp.remote_ip,
p_tcp_ramrod->tcp.remote_port,
p_tcp_ramrod->tcp.vlan_id);
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
p_tcp_ramrod->tcp.flow_label,
p_tcp_ramrod->tcp.ttl,
p_tcp_ramrod->tcp.tos_or_tc,
p_tcp_ramrod->tcp.mss,
p_tcp_ramrod->tcp.rcv_wnd_scale,
p_tcp_ramrod->tcp.connect_mode,
p_tcp_ramrod->tcp.flags);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
p_tcp_ramrod->tcp.syn_ip_payload_length,
p_tcp_ramrod->tcp.syn_phy_addr_lo,
p_tcp_ramrod->tcp.syn_phy_addr_hi);
}
static int
qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
struct tcp_offload_params_opt2 *tcp;
struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent;
dma_addr_t async_output_phys;
dma_addr_t in_pdata_phys;
u16 physical_q;
u8 tcp_flags;
int rc;
int i;
memset(&init_data, 0, sizeof(init_data));
init_data.cid = ep->tcp_cid;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
if (ep->connect_mode == TCP_CONNECT_PASSIVE)
init_data.comp_mode = QED_SPQ_MODE_CB;
else
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
PROTOCOLID_IWARP, &init_data);
if (rc)
return rc;
p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
in_pdata_phys = ep->ep_buffer_phys +
offsetof(struct qed_iwarp_ep_memory, in_pdata);
DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
in_pdata_phys);
p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
async_output_phys = ep->ep_buffer_phys +
offsetof(struct qed_iwarp_ep_memory, async_output);
DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
async_output_phys);
p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
tcp = &p_tcp_ramrod->tcp;
qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
&tcp->remote_mac_addr_mid,
&tcp->remote_mac_addr_lo, ep->remote_mac_addr);
qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
&tcp->local_mac_addr_lo, ep->local_mac_addr);
tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
tcp->flags = 0;
SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
!!(tcp_flags & QED_IWARP_TS_EN));
SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
!!(tcp_flags & QED_IWARP_DA_EN));
tcp->ip_version = ep->cm_info.ip_version;
for (i = 0; i < 4; i++) {
tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
}
tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
tcp->mss = cpu_to_le16(ep->mss);
tcp->flow_label = 0;
tcp->ttl = 0x40;
tcp->tos_or_tc = 0;
tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
tcp->connect_mode = ep->connect_mode;
if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
tcp->syn_ip_payload_length =
cpu_to_le16(ep->syn_ip_payload_length);
tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
}
qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
return rc;
}
static void
qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
struct qed_iwarp_cm_event_params params;
struct mpa_v2_hdr *mpa_v2;
union async_output *async_data;
u16 mpa_ord, mpa_ird;
u8 mpa_hdr_size = 0;
u8 mpa_rev;
async_data = &ep->ep_buffer_virt->async_output;
mpa_rev = async_data->mpa_request.mpa_handshake_mode;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"private_data_len=%x handshake_mode=%x private_data=(%x)\n",
async_data->mpa_request.ulp_data_len,
mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
/* Read ord/ird values from private data buffer */
mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
mpa_hdr_size = sizeof(*mpa_v2);
mpa_ord = ntohs(mpa_v2->ord);
mpa_ird = ntohs(mpa_v2->ird);
/* Temprary store in cm_info incoming ord/ird requested, later
* replace with negotiated value during accept
*/
ep->cm_info.ord = (u8)min_t(u16,
(mpa_ord & MPA_V2_IRD_ORD_MASK),
QED_IWARP_ORD_DEFAULT);
ep->cm_info.ird = (u8)min_t(u16,
(mpa_ird & MPA_V2_IRD_ORD_MASK),
QED_IWARP_IRD_DEFAULT);
/* Peer2Peer negotiation */
ep->rtr_type = MPA_RTR_TYPE_NONE;
if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
if (mpa_ord & MPA_V2_WRITE_RTR)
ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
if (mpa_ord & MPA_V2_READ_RTR)
ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
if (mpa_ird & MPA_V2_SEND_RTR)
ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
ep->rtr_type &= iwarp_info->rtr_type;
/* if we're left with no match send our capabilities */
if (ep->rtr_type == MPA_RTR_TYPE_NONE)
ep->rtr_type = iwarp_info->rtr_type;
}
ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
} else {
ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
async_data->mpa_request.ulp_data_len, mpa_hdr_size);
/* Strip mpa v2 hdr from private data before sending to upper layer */
ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len -
mpa_hdr_size;
params.event = QED_IWARP_EVENT_MPA_REQUEST;
params.cm_info = &ep->cm_info;
params.ep_context = ep;
params.status = 0;
ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
ep->event_cb(ep->cb_context, &params);
}
static int
qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
struct qed_sp_init_data init_data;
dma_addr_t async_output_phys;
struct qed_spq_entry *p_ent;
dma_addr_t out_pdata_phys;
dma_addr_t in_pdata_phys;
struct qed_rdma_qp *qp;
bool reject;
int rc;
if (!ep)
return -EINVAL;
qp = ep->qp;
reject = !qp;
memset(&init_data, 0, sizeof(init_data));
init_data.cid = reject ? ep->tcp_cid : qp->icid;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
if (ep->connect_mode == TCP_CONNECT_ACTIVE)
init_data.comp_mode = QED_SPQ_MODE_CB;
else
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
PROTOCOLID_IWARP, &init_data);
if (rc)
return rc;
p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
out_pdata_phys = ep->ep_buffer_phys +
offsetof(struct qed_iwarp_ep_memory, out_pdata);
DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr,
out_pdata_phys);
p_mpa_ramrod->common.outgoing_ulp_buffer.len =
ep->cm_info.private_data_len;
p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
in_pdata_phys = ep->ep_buffer_phys +
offsetof(struct qed_iwarp_ep_memory, in_pdata);
p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
in_pdata_phys);
p_mpa_ramrod->incoming_ulp_buffer.len =
cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
async_output_phys = ep->ep_buffer_phys +
offsetof(struct qed_iwarp_ep_memory, async_output);
DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
async_output_phys);
p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
if (!reject) {
DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
qp->shared_queue_phys_addr);
p_mpa_ramrod->stats_counter_id =
RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
} else {
p_mpa_ramrod->common.reject = 1;
}
p_mpa_ramrod->mode = ep->mpa_rev;
SET_FIELD(p_mpa_ramrod->rtr_pref,
IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
ep->state = QED_IWARP_EP_MPA_OFFLOADED;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (!reject)
ep->cid = qp->icid; /* Now they're migrated. */
DP_VERBOSE(p_hwfn,
QED_MSG_RDMA,
"QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
reject ? 0xffff : qp->icid,
ep->tcp_cid,
rc,
ep->cm_info.ird,
ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
return rc;
}
static void
qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
ep->state = QED_IWARP_EP_INIT;
if (ep->qp)
ep->qp->ep = NULL;
ep->qp = NULL;
memset(&ep->cm_info, 0, sizeof(ep->cm_info));
if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
/* We don't care about the return code, it's ok if tcp_cid
* remains invalid...in this case we'll defer allocation
*/
qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
}
spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
list_del(&ep->list_entry);
list_add_tail(&ep->list_entry,
&p_hwfn->p_rdma_info->iwarp.ep_free_list);
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
}
void
qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct mpa_v2_hdr *mpa_v2_params;
union async_output *async_data;
u16 mpa_ird, mpa_ord;
u8 mpa_data_size = 0;
if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
mpa_v2_params =
(struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
mpa_data_size = sizeof(*mpa_v2_params);
mpa_ird = ntohs(mpa_v2_params->ird);
mpa_ord = ntohs(mpa_v2_params->ord);
ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
}
async_data = &ep->ep_buffer_virt->async_output;
ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len -
mpa_data_size;
}
void
qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
struct qed_iwarp_cm_event_params params;
if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
DP_NOTICE(p_hwfn,
"MPA reply event not expected on passive side!\n");
return;
}
params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
qed_iwarp_parse_private_data(p_hwfn, ep);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
params.cm_info = &ep->cm_info;
params.ep_context = ep;
params.status = 0;
ep->mpa_reply_processed = true;
ep->event_cb(ep->cb_context, &params);
}
#define QED_IWARP_CONNECT_MODE_STRING(ep) \
((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
/* Called as a result of the event:
* IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
*/
static void
qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
struct qed_iwarp_ep *ep, u8 fw_return_code)
{
struct qed_iwarp_cm_event_params params;
if (ep->connect_mode == TCP_CONNECT_ACTIVE)
params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
else
params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
qed_iwarp_parse_private_data(p_hwfn, ep);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
params.cm_info = &ep->cm_info;
params.ep_context = ep;
ep->state = QED_IWARP_EP_CLOSED;
switch (fw_return_code) {
case RDMA_RETURN_OK:
ep->qp->max_rd_atomic_req = ep->cm_info.ord;
ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
ep->state = QED_IWARP_EP_ESTABLISHED;
params.status = 0;
break;
case IWARP_CONN_ERROR_MPA_TIMEOUT:
DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
params.status = -EBUSY;
break;
case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
params.status = -ECONNREFUSED;
break;
case IWARP_CONN_ERROR_MPA_RST:
DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
ep->tcp_cid);
params.status = -ECONNRESET;
break;
case IWARP_CONN_ERROR_MPA_FIN:
DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
params.status = -ECONNREFUSED;
break;
case IWARP_CONN_ERROR_MPA_INSUF_IRD:
DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
params.status = -ECONNREFUSED;
break;
case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
params.status = -ECONNREFUSED;
break;
case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
params.status = -ECONNREFUSED;
break;
case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
params.status = -ECONNREFUSED;
break;
case IWARP_CONN_ERROR_MPA_TERMINATE:
DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
params.status = -ECONNREFUSED;
break;
default:
params.status = -ECONNRESET;
break;
}
ep->event_cb(ep->cb_context, &params);
/* on passive side, if there is no associated QP (REJECT) we need to
* return the ep to the pool, (in the regular case we add an element
* in accept instead of this one.
* In both cases we need to remove it from the ep_list.
*/
if (fw_return_code != RDMA_RETURN_OK) {
ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
(!ep->qp)) { /* Rejected */
qed_iwarp_return_ep(p_hwfn, ep);
} else {
spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
list_del(&ep->list_entry);
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
}
}
}
static void
qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
struct qed_iwarp_ep *ep, u8 *mpa_data_size)
{
struct mpa_v2_hdr *mpa_v2_params;
u16 mpa_ird, mpa_ord;
*mpa_data_size = 0;
if (MPA_REV2(ep->mpa_rev)) {
mpa_v2_params =
(struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
*mpa_data_size = sizeof(*mpa_v2_params);
mpa_ird = (u16)ep->cm_info.ird;
mpa_ord = (u16)ep->cm_info.ord;
if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
mpa_ird |= MPA_V2_PEER2PEER_MODEL;
if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
mpa_ird |= MPA_V2_SEND_RTR;
if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
mpa_ord |= MPA_V2_WRITE_RTR;
if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
mpa_ord |= MPA_V2_READ_RTR;
}
mpa_v2_params->ird = htons(mpa_ird);
mpa_v2_params->ord = htons(mpa_ord);
DP_VERBOSE(p_hwfn,
QED_MSG_RDMA,
"MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
mpa_v2_params->ird,
mpa_v2_params->ord,
*((u32 *)mpa_v2_params),
mpa_ord & MPA_V2_IRD_ORD_MASK,
mpa_ird & MPA_V2_IRD_ORD_MASK,
!!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
!!(mpa_ird & MPA_V2_SEND_RTR),
!!(mpa_ord & MPA_V2_WRITE_RTR),
!!(mpa_ord & MPA_V2_READ_RTR));
}
}
int qed_iwarp_connect(void *rdma_cxt,
struct qed_iwarp_connect_in *iparams,
struct qed_iwarp_connect_out *oparams)
{
struct qed_hwfn *p_hwfn = rdma_cxt;
struct qed_iwarp_info *iwarp_info;
struct qed_iwarp_ep *ep;
u8 mpa_data_size = 0;
u8 ts_hdr_size = 0;
u32 cid;
int rc;
if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
(iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
DP_NOTICE(p_hwfn,
"QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
iparams->qp->icid, iparams->cm_info.ord,
iparams->cm_info.ird);
return -EINVAL;
}
iwarp_info = &p_hwfn->p_rdma_info->iwarp;
/* Allocate ep object */
rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
if (rc)
return rc;
rc = qed_iwarp_create_ep(p_hwfn, &ep);
if (rc)
goto err;
ep->tcp_cid = cid;
spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
ep->qp = iparams->qp;
ep->qp->ep = ep;
ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
ep->cm_info.ord = iparams->cm_info.ord;
ep->cm_info.ird = iparams->cm_info.ird;
ep->rtr_type = iwarp_info->rtr_type;
if (!iwarp_info->peer2peer)
ep->rtr_type = MPA_RTR_TYPE_NONE;
if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
ep->cm_info.ord = 1;
ep->mpa_rev = iwarp_info->mpa_rev;
qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
mpa_data_size;
memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
iparams->cm_info.private_data,
iparams->cm_info.private_data_len);
if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
ts_hdr_size = TIMESTAMP_HEADER_SIZE;
ep->mss = iparams->mss - ts_hdr_size;
ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
ep->event_cb = iparams->event_cb;
ep->cb_context = iparams->cb_context;
ep->connect_mode = TCP_CONNECT_ACTIVE;
oparams->ep_context = ep;
rc = qed_iwarp_tcp_offload(p_hwfn, ep);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
iparams->qp->icid, ep->tcp_cid, rc);
if (rc) {
qed_iwarp_destroy_ep(p_hwfn, ep, true);
goto err;
}
return rc;
err:
qed_iwarp_cid_cleaned(p_hwfn, cid);
return rc;
}
static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
{
struct qed_iwarp_ep *ep = NULL;
int rc;
spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
DP_ERR(p_hwfn, "Ep list is empty\n");
goto out;
}
ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
struct qed_iwarp_ep, list_entry);
/* in some cases we could have failed allocating a tcp cid when added
* from accept / failure... retry now..this is not the common case.
*/
if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
/* if we fail we could look for another entry with a valid
* tcp_cid, but since we don't expect to reach this anyway
* it's not worth the handling
*/
if (rc) {
ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
ep = NULL;
goto out;
}
}
list_del(&ep->list_entry);
out:
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
return ep;
}
#define QED_IWARP_MAX_CID_CLEAN_TIME 100
#define QED_IWARP_MAX_NO_PROGRESS_CNT 5
/* This function waits for all the bits of a bmap to be cleared, as long as
* there is progress ( i.e. the number of bits left to be cleared decreases )
* the function continues.
*/
static int
qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
{
int prev_weight = 0;
int wait_count = 0;
int weight = 0;
weight = bitmap_weight(bmap->bitmap, bmap->max_count);
prev_weight = weight;
while (weight) {
msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
weight = bitmap_weight(bmap->bitmap, bmap->max_count);
if (prev_weight == weight) {
wait_count++;
} else {
prev_weight = weight;
wait_count = 0;
}
if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
DP_NOTICE(p_hwfn,
"%s bitmap wait timed out (%d cids pending)\n",
bmap->name, weight);
return -EBUSY;
}
}
return 0;
}
static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
{
int rc;
int i;
rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
&p_hwfn->p_rdma_info->tcp_cid_map);
if (rc)
return rc;
/* Now free the tcp cids from the main cid map */
for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
/* Now wait for all cids to be completed */
return qed_iwarp_wait_cid_map_cleared(p_hwfn,
&p_hwfn->p_rdma_info->cid_map);
}
static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
{
struct qed_iwarp_ep *ep;
while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
struct qed_iwarp_ep, list_entry);
if (!ep) {
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
break;
}
list_del(&ep->list_entry);
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
qed_iwarp_destroy_ep(p_hwfn, ep, false);
}
}
static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
{
struct qed_iwarp_ep *ep;
int rc = 0;
int count;
u32 cid;
int i;
count = init ? QED_IWARP_PREALLOC_CNT : 1;
for (i = 0; i < count; i++) {
rc = qed_iwarp_create_ep(p_hwfn, &ep);
if (rc)
return rc;
/* During initialization we allocate from the main pool,
* afterwards we allocate only from the tcp_cid.
*/
if (init) {
rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
if (rc)
goto err;
qed_iwarp_set_tcp_cid(p_hwfn, cid);
} else {
/* We don't care about the return code, it's ok if
* tcp_cid remains invalid...in this case we'll
* defer allocation
*/
qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
}
ep->tcp_cid = cid;
spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
list_add_tail(&ep->list_entry,
&p_hwfn->p_rdma_info->iwarp.ep_free_list);
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
}
return rc;
err:
qed_iwarp_destroy_ep(p_hwfn, ep, false);
return rc;
}
int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
{
int rc;
/* Allocate bitmap for tcp cid. These are used by passive side
* to ensure it can allocate a tcp cid during dpc that was
* pre-acquired and doesn't require dynamic allocation of ilt
*/
rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
QED_IWARP_PREALLOC_CNT, "TCP_CID");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate tcp cid, rc = %d\n", rc);
return rc;
}
INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
return qed_iwarp_prealloc_ep(p_hwfn, true);
}
void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
{
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
}
int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
{
struct qed_hwfn *p_hwfn = rdma_cxt;
struct qed_iwarp_ep *ep;
u8 mpa_data_size = 0;
int rc;
ep = iparams->ep_context;
if (!ep) {
DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
return -EINVAL;
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
iparams->qp->icid, ep->tcp_cid);
if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
(iparams->ird > QED_IWARP_IRD_DEFAULT)) {
DP_VERBOSE(p_hwfn,
QED_MSG_RDMA,
"QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
iparams->qp->icid,
ep->tcp_cid, iparams->ord, iparams->ord);
return -EINVAL;
}
qed_iwarp_prealloc_ep(p_hwfn, false);
ep->cb_context = iparams->cb_context;
ep->qp = iparams->qp;
ep->qp->ep = ep;
if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
/* Negotiate ord/ird: if upperlayer requested ord larger than
* ird advertised by remote, we need to decrease our ord
*/
if (iparams->ord > ep->cm_info.ird)
iparams->ord = ep->cm_info.ird;
if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
(iparams->ird == 0))
iparams->ird = 1;
}
/* Update cm_info ord/ird to be negotiated values */
ep->cm_info.ord = iparams->ord;
ep->cm_info.ird = iparams->ird;
qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
ep->cm_info.private_data_len = iparams->private_data_len +
mpa_data_size;
memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
iparams->private_data, iparams->private_data_len);
rc = qed_iwarp_mpa_offload(p_hwfn, ep);
if (rc)
qed_iwarp_modify_qp(p_hwfn,
iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
return rc;
}
int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
{
struct qed_hwfn *p_hwfn = rdma_cxt;
struct qed_iwarp_ep *ep;
u8 mpa_data_size = 0;
ep = iparams->ep_context;
if (!ep) {
DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
return -EINVAL;
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
ep->cb_context = iparams->cb_context;
ep->qp = NULL;
qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
ep->cm_info.private_data_len = iparams->private_data_len +
mpa_data_size;
memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
iparams->private_data, iparams->private_data_len);
return qed_iwarp_mpa_offload(p_hwfn, ep);
}
static void
qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
struct qed_iwarp_cm_info *cm_info)
{
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
cm_info->ip_version);
if (cm_info->ip_version == QED_TCP_IPV4)
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
cm_info->remote_ip, cm_info->remote_port,
cm_info->local_ip, cm_info->local_port,
cm_info->vlan);
else
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"remote_ip %pI6h:%x, local_ip %pI6h:%x vlan=%x\n",
cm_info->remote_ip, cm_info->remote_port,
cm_info->local_ip, cm_info->local_port,
cm_info->vlan);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"private_data_len = %x ord = %d, ird = %d\n",
cm_info->private_data_len, cm_info->ord, cm_info->ird);
}
static int
qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
struct qed_iwarp_ll2_buff *buf, u8 handle)
{
int rc;
rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
(u16)buf->buff_size, buf, 1);
if (rc) {
DP_NOTICE(p_hwfn,
"Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
rc, handle);
dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
buf->data, buf->data_phys_addr);
kfree(buf);
}
return rc;
}
static bool
qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
{
struct qed_iwarp_ep *ep = NULL;
bool found = false;
list_for_each_entry(ep,
&p_hwfn->p_rdma_info->iwarp.ep_list,
list_entry) {
if ((ep->cm_info.local_port == cm_info->local_port) &&
(ep->cm_info.remote_port == cm_info->remote_port) &&
(ep->cm_info.vlan == cm_info->vlan) &&
!memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
sizeof(cm_info->local_ip)) &&
!memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
sizeof(cm_info->remote_ip))) {
found = true;
break;
}
}
if (found) {
DP_NOTICE(p_hwfn,
"SYN received on active connection - dropping\n");
qed_iwarp_print_cm_info(p_hwfn, cm_info);
return true;
}
return false;
}
static struct qed_iwarp_listener *
qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
struct qed_iwarp_cm_info *cm_info)
{
struct qed_iwarp_listener *listener = NULL;
static const u32 ip_zero[4] = { 0, 0, 0, 0 };
bool found = false;
qed_iwarp_print_cm_info(p_hwfn, cm_info);
list_for_each_entry(listener,
&p_hwfn->p_rdma_info->iwarp.listen_list,
list_entry) {
if (listener->port == cm_info->local_port) {
if (!memcmp(listener->ip_addr,
ip_zero, sizeof(ip_zero))) {
found = true;
break;
}
if (!memcmp(listener->ip_addr,
cm_info->local_ip,
sizeof(cm_info->local_ip)) &&
(listener->vlan == cm_info->vlan)) {
found = true;
break;
}
}
}
if (found) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
listener);
return listener;
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
return NULL;
}
static int
qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
struct qed_iwarp_cm_info *cm_info,
void *buf,
u8 *remote_mac_addr,
u8 *local_mac_addr,
int *payload_len, int *tcp_start_offset)
{
struct vlan_ethhdr *vethh;
bool vlan_valid = false;
struct ipv6hdr *ip6h;
struct ethhdr *ethh;
struct tcphdr *tcph;
struct iphdr *iph;
int eth_hlen;
int ip_hlen;
int eth_type;
int i;
ethh = buf;
eth_type = ntohs(ethh->h_proto);
if (eth_type == ETH_P_8021Q) {
vlan_valid = true;
vethh = (struct vlan_ethhdr *)ethh;
cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
}
eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
ether_addr_copy(remote_mac_addr, ethh->h_source);
ether_addr_copy(local_mac_addr, ethh->h_dest);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
eth_type, ethh->h_source);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
eth_hlen, ethh->h_dest);
iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
if (eth_type == ETH_P_IP) {
cm_info->local_ip[0] = ntohl(iph->daddr);
cm_info->remote_ip[0] = ntohl(iph->saddr);
cm_info->ip_version = TCP_IPV4;
ip_hlen = (iph->ihl) * sizeof(u32);
*payload_len = ntohs(iph->tot_len) - ip_hlen;
} else if (eth_type == ETH_P_IPV6) {
ip6h = (struct ipv6hdr *)iph;
for (i = 0; i < 4; i++) {
cm_info->local_ip[i] =
ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
cm_info->remote_ip[i] =
ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
}
cm_info->ip_version = TCP_IPV6;
ip_hlen = sizeof(*ip6h);
*payload_len = ntohs(ip6h->payload_len);
} else {
DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
return -EINVAL;
}
tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
if (!tcph->syn) {
DP_NOTICE(p_hwfn,
"Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
iph->ihl, tcph->source, tcph->dest);
return -EINVAL;
}
cm_info->local_port = ntohs(tcph->dest);
cm_info->remote_port = ntohs(tcph->source);
qed_iwarp_print_cm_info(p_hwfn, cm_info);
*tcp_start_offset = eth_hlen + ip_hlen;
return 0;
}
static void
qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
{
struct qed_iwarp_ll2_buff *buf = data->cookie;
struct qed_iwarp_listener *listener;
struct qed_ll2_tx_pkt_info tx_pkt;
struct qed_iwarp_cm_info cm_info;
struct qed_hwfn *p_hwfn = cxt;
u8 remote_mac_addr[ETH_ALEN];
u8 local_mac_addr[ETH_ALEN];
struct qed_iwarp_ep *ep;
int tcp_start_offset;
u8 ts_hdr_size = 0;
u8 ll2_syn_handle;
int payload_len;
u32 hdr_size;
int rc;
memset(&cm_info, 0, sizeof(cm_info));
if (GET_FIELD(data->parse_flags,
PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
goto err;
}
rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
data->u.placement_offset, remote_mac_addr,
local_mac_addr, &payload_len,
&tcp_start_offset);
if (rc)
goto err;
/* Check if there is a listener for this 4-tuple+vlan */
ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
if (!listener) {
DP_VERBOSE(p_hwfn,
QED_MSG_RDMA,
"SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
data->parse_flags, data->length.packet_length);
memset(&tx_pkt, 0, sizeof(tx_pkt));
tx_pkt.num_of_bds = 1;
tx_pkt.vlan = data->vlan;
if (GET_FIELD(data->parse_flags,
PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
SET_FIELD(tx_pkt.bd_flags,
CORE_TX_BD_DATA_VLAN_INSERTION, 1);
tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
tx_pkt.first_frag = buf->data_phys_addr +
data->u.placement_offset;
tx_pkt.first_frag_len = data->length.packet_length;
tx_pkt.cookie = buf;
rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
&tx_pkt, true);
if (rc) {
DP_NOTICE(p_hwfn,
"Can't post SYN back to chip rc=%d\n", rc);
goto err;
}
return;
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
/* There may be an open ep on this connection if this is a syn
* retrasnmit... need to make sure there isn't...
*/
if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
goto err;
ep = qed_iwarp_get_free_ep(p_hwfn);
if (!ep)
goto err;
spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
ether_addr_copy(ep->local_mac_addr, local_mac_addr);
memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
ts_hdr_size = TIMESTAMP_HEADER_SIZE;
hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) +
ts_hdr_size;
ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
ep->event_cb = listener->event_cb;
ep->cb_context = listener->cb_context;
ep->connect_mode = TCP_CONNECT_PASSIVE;
ep->syn = buf;
ep->syn_ip_payload_length = (u16)payload_len;
ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
tcp_start_offset;
rc = qed_iwarp_tcp_offload(p_hwfn, ep);
if (rc) {
qed_iwarp_return_ep(p_hwfn, ep);
goto err;
}
return;
err:
qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
}
static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
void *cookie, dma_addr_t rx_buf_addr,
bool b_last_packet)
{
struct qed_iwarp_ll2_buff *buffer = cookie;
struct qed_hwfn *p_hwfn = cxt;
dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
buffer->data, buffer->data_phys_addr);
kfree(buffer);
}
static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
void *cookie, dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet)
{
struct qed_iwarp_ll2_buff *buffer = cookie;
struct qed_hwfn *p_hwfn = cxt;
/* this was originally an rx packet, post it back */
qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
}
static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
void *cookie, dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet)
{
struct qed_iwarp_ll2_buff *buffer = cookie;
struct qed_hwfn *p_hwfn = cxt;
if (!buffer)
return;
dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
buffer->data, buffer->data_phys_addr);
kfree(buffer);
}
static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
int rc = 0;
if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
rc = qed_ll2_terminate_connection(p_hwfn,
iwarp_info->ll2_syn_handle);
if (rc)
DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
}
qed_llh_remove_mac_filter(p_hwfn,
p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr);
return rc;
}
static int
qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
int num_rx_bufs, int buff_size, u8 ll2_handle)
{
struct qed_iwarp_ll2_buff *buffer;
int rc = 0;
int i;
for (i = 0; i < num_rx_bufs; i++) {
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
rc = -ENOMEM;
break;
}
buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
buff_size,
&buffer->data_phys_addr,
GFP_KERNEL);
if (!buffer->data) {
kfree(buffer);
rc = -ENOMEM;
break;
}
buffer->buff_size = buff_size;
rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
if (rc)
/* buffers will be deallocated by qed_ll2 */
break;
}
return rc;
}
#define QED_IWARP_MAX_BUF_SIZE(mtu) \
ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
ETH_CACHE_LINE_SIZE)
static int
qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
struct qed_rdma_start_in_params *params,
struct qed_ptt *p_ptt)
{
struct qed_iwarp_info *iwarp_info;
struct qed_ll2_acquire_data data;
struct qed_ll2_cbs cbs;
int rc = 0;
iwarp_info = &p_hwfn->p_rdma_info->iwarp;
iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
iwarp_info->max_mtu = params->max_mtu;
ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, params->mac_addr);
if (rc)
return rc;
/* Start SYN connection */
cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
cbs.cookie = p_hwfn;
memset(&data, 0, sizeof(data));
data.input.conn_type = QED_LL2_TYPE_IWARP;
data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
data.input.tx_tc = PKT_LB_TC;
data.input.tx_dest = QED_LL2_TX_DEST_LB;
data.p_connection_handle = &iwarp_info->ll2_syn_handle;
data.cbs = &cbs;
rc = qed_ll2_acquire_connection(p_hwfn, &data);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
qed_llh_remove_mac_filter(p_hwfn, p_ptt, params->mac_addr);
return rc;
}
rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
goto err;
}
rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
QED_IWARP_LL2_SYN_RX_SIZE,
QED_IWARP_MAX_SYN_PKT_SIZE,
iwarp_info->ll2_syn_handle);
if (rc)
goto err;
return rc;
err:
qed_iwarp_ll2_stop(p_hwfn, p_ptt);
return rc;
}
int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params)
{
struct qed_iwarp_info *iwarp_info;
u32 rcv_wnd_size;
iwarp_info = &p_hwfn->p_rdma_info->iwarp;
iwarp_info->tcp_flags = QED_IWARP_TS_EN;
rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF;
/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
iwarp_info->rtr_type = MPA_RTR_TYPE_ZERO_SEND |
MPA_RTR_TYPE_ZERO_WRITE |
MPA_RTR_TYPE_ZERO_READ;
spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
qed_iwarp_async_event);
return qed_iwarp_ll2_start(p_hwfn, params, p_ptt);
}
int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
int rc;
qed_iwarp_free_prealloc_ep(p_hwfn);
rc = qed_iwarp_wait_for_all_cids(p_hwfn);
if (rc)
return rc;
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
}
void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
struct qed_iwarp_ep *ep, u8 fw_return_code)
{
struct qed_iwarp_cm_event_params params;
qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
params.event = QED_IWARP_EVENT_CLOSE;
params.ep_context = ep;
params.cm_info = &ep->cm_info;
params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
0 : -ECONNRESET;
ep->state = QED_IWARP_EP_CLOSED;
spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
list_del(&ep->list_entry);
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
ep->event_cb(ep->cb_context, &params);
}
void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
struct qed_iwarp_ep *ep, int fw_ret_code)
{
struct qed_iwarp_cm_event_params params;
bool event_cb = false;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
ep->cid, fw_ret_code);
switch (fw_ret_code) {
case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
params.status = 0;
params.event = QED_IWARP_EVENT_DISCONNECT;
event_cb = true;
break;
case IWARP_EXCEPTION_DETECTED_LLP_RESET:
params.status = -ECONNRESET;
params.event = QED_IWARP_EVENT_DISCONNECT;
event_cb = true;
break;
case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
params.event = QED_IWARP_EVENT_RQ_EMPTY;
event_cb = true;
break;
case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
params.event = QED_IWARP_EVENT_IRQ_FULL;
event_cb = true;
break;
case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
event_cb = true;
break;
case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
event_cb = true;
break;
case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
event_cb = true;
break;
case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
event_cb = true;
break;
case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
event_cb = true;
break;
case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
event_cb = true;
break;
case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
event_cb = true;
break;
default:
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Unhandled exception received...fw_ret_code=%d\n",
fw_ret_code);
break;
}
if (event_cb) {
params.ep_context = ep;
params.cm_info = &ep->cm_info;
ep->event_cb(ep->cb_context, &params);
}
}
static void
qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
struct qed_iwarp_ep *ep, u8 fw_return_code)
{
struct qed_iwarp_cm_event_params params;
memset(&params, 0, sizeof(params));
params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
params.ep_context = ep;
params.cm_info = &ep->cm_info;
ep->state = QED_IWARP_EP_CLOSED;
switch (fw_return_code) {
case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"%s(0x%x) TCP connect got invalid packet\n",
QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
params.status = -ECONNRESET;
break;
case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"%s(0x%x) TCP Connection Reset\n",
QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
params.status = -ECONNRESET;
break;
case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
params.status = -EBUSY;
break;
case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
params.status = -ECONNREFUSED;
break;
case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
params.status = -ECONNRESET;
break;
default:
DP_ERR(p_hwfn,
"%s(0x%x) Unexpected return code tcp connect: %d\n",
QED_IWARP_CONNECT_MODE_STRING(ep),
ep->tcp_cid, fw_return_code);
params.status = -ECONNRESET;
break;
}
if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
qed_iwarp_return_ep(p_hwfn, ep);
} else {
ep->event_cb(ep->cb_context, &params);
spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
list_del(&ep->list_entry);
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
}
}
void
qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
struct qed_iwarp_ep *ep, u8 fw_return_code)
{
u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
/* Done with the SYN packet, post back to ll2 rx */
qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
ep->syn = NULL;
/* If connect failed - upper layer doesn't know about it */
if (fw_return_code == RDMA_RETURN_OK)
qed_iwarp_mpa_received(p_hwfn, ep);
else
qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
fw_return_code);
} else {
if (fw_return_code == RDMA_RETURN_OK)
qed_iwarp_mpa_offload(p_hwfn, ep);
else
qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
fw_return_code);
}
}
static inline bool
qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{
if (!ep || (ep->sig != QED_EP_SIG)) {
DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
return false;
}
return true;
}
static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code, u16 echo,
union event_ring_data *data,
u8 fw_return_code)
{
struct regpair *fw_handle = &data->rdma_data.async_handle;
struct qed_iwarp_ep *ep = NULL;
u16 cid;
ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
fw_handle->lo);
switch (fw_event_code) {
case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
/* Async completion after TCP 3-way handshake */
if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
return -EINVAL;
DP_VERBOSE(p_hwfn,
QED_MSG_RDMA,
"EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
ep->tcp_cid, fw_return_code);
qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
break;
case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
return -EINVAL;
DP_VERBOSE(p_hwfn,
QED_MSG_RDMA,
"QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
ep->cid, fw_return_code);
qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
break;
case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
/* Async completion for Close Connection ramrod */
if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
return -EINVAL;
DP_VERBOSE(p_hwfn,
QED_MSG_RDMA,
"QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
ep->cid, fw_return_code);
qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
break;
case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
/* Async event for active side only */
if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
return -EINVAL;
DP_VERBOSE(p_hwfn,
QED_MSG_RDMA,
"QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
ep->cid, fw_return_code);
qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
break;
case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
return -EINVAL;
DP_VERBOSE(p_hwfn,
QED_MSG_RDMA,
"QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
ep->cid, fw_return_code);
qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
break;
case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
cid = (u16)le32_to_cpu(fw_handle->lo);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
qed_iwarp_cid_cleaned(p_hwfn, cid);
break;
case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
p_hwfn->p_rdma_info->events.affiliated_event(
p_hwfn->p_rdma_info->events.context,
QED_IWARP_EVENT_CQ_OVERFLOW,
(void *)fw_handle);
break;
default:
DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
fw_event_code);
return -EINVAL;
}
return 0;
}
int
qed_iwarp_create_listen(void *rdma_cxt,
struct qed_iwarp_listen_in *iparams,
struct qed_iwarp_listen_out *oparams)
{
struct qed_hwfn *p_hwfn = rdma_cxt;
struct qed_iwarp_listener *listener;
listener = kzalloc(sizeof(*listener), GFP_KERNEL);
if (!listener)
return -ENOMEM;
listener->ip_version = iparams->ip_version;
memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
listener->port = iparams->port;
listener->vlan = iparams->vlan;
listener->event_cb = iparams->event_cb;
listener->cb_context = iparams->cb_context;
listener->max_backlog = iparams->max_backlog;
oparams->handle = listener;
spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
list_add_tail(&listener->list_entry,
&p_hwfn->p_rdma_info->iwarp.listen_list);
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
DP_VERBOSE(p_hwfn,
QED_MSG_RDMA,
"callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
listener->event_cb,
listener,
listener->ip_addr[0],
listener->ip_addr[1],
listener->ip_addr[2],
listener->ip_addr[3], listener->port, listener->vlan);
return 0;
}
int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
{
struct qed_iwarp_listener *listener = handle;
struct qed_hwfn *p_hwfn = rdma_cxt;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
list_del(&listener->list_entry);
spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
kfree(listener);
return 0;
}
int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
{
struct qed_hwfn *p_hwfn = rdma_cxt;
struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent;
struct qed_iwarp_ep *ep;
struct qed_rdma_qp *qp;
int rc;
ep = iparams->ep_context;
if (!ep) {
DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
return -EINVAL;
}
qp = ep->qp;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
qp->icid, ep->tcp_cid);
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qp->icid;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_CB;
rc = qed_sp_init_request(p_hwfn, &p_ent,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
PROTOCOLID_IWARP, &init_data);
if (rc)
return rc;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
return rc;
}
void
qed_iwarp_query_qp(struct qed_rdma_qp *qp,
struct qed_rdma_query_qp_out_params *out_params)
{
out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
}
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and /or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _QED_IWARP_H
#define _QED_IWARP_H
enum qed_iwarp_qp_state {
QED_IWARP_QP_STATE_IDLE,
QED_IWARP_QP_STATE_RTS,
QED_IWARP_QP_STATE_TERMINATE,
QED_IWARP_QP_STATE_CLOSING,
QED_IWARP_QP_STATE_ERROR,
};
enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
#define QED_IWARP_PREALLOC_CNT (256)
#define QED_IWARP_LL2_SYN_TX_SIZE (128)
#define QED_IWARP_LL2_SYN_RX_SIZE (256)
#define QED_IWARP_MAX_SYN_PKT_SIZE (128)
#define QED_IWARP_HANDLE_INVAL (0xff)
struct qed_iwarp_ll2_buff {
void *data;
dma_addr_t data_phys_addr;
u32 buff_size;
};
struct qed_iwarp_info {
struct list_head listen_list; /* qed_iwarp_listener */
struct list_head ep_list; /* qed_iwarp_ep */
struct list_head ep_free_list; /* pre-allocated ep's */
spinlock_t iw_lock; /* for iwarp resources */
spinlock_t qp_lock; /* for teardown races */
u32 rcv_wnd_scale;
u16 max_mtu;
u8 mac_addr[ETH_ALEN];
u8 crc_needed;
u8 tcp_flags;
u8 ll2_syn_handle;
u8 peer2peer;
enum mpa_negotiation_mode mpa_rev;
enum mpa_rtr_type rtr_type;
};
enum qed_iwarp_ep_state {
QED_IWARP_EP_INIT,
QED_IWARP_EP_MPA_REQ_RCVD,
QED_IWARP_EP_MPA_OFFLOADED,
QED_IWARP_EP_ESTABLISHED,
QED_IWARP_EP_CLOSED
};
union async_output {
struct iwarp_eqe_data_mpa_async_completion mpa_response;
struct iwarp_eqe_data_tcp_async_completion mpa_request;
};
#define QED_MAX_PRIV_DATA_LEN (512)
struct qed_iwarp_ep_memory {
u8 in_pdata[QED_MAX_PRIV_DATA_LEN];
u8 out_pdata[QED_MAX_PRIV_DATA_LEN];
union async_output async_output;
};
/* Endpoint structure represents a TCP connection. This connection can be
* associated with a QP or not (in which case QP==NULL)
*/
struct qed_iwarp_ep {
struct list_head list_entry;
struct qed_rdma_qp *qp;
struct qed_iwarp_ep_memory *ep_buffer_virt;
dma_addr_t ep_buffer_phys;
enum qed_iwarp_ep_state state;
int sig;
struct qed_iwarp_cm_info cm_info;
enum tcp_connect_mode connect_mode;
enum mpa_rtr_type rtr_type;
enum mpa_negotiation_mode mpa_rev;
u32 tcp_cid;
u32 cid;
u16 mss;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
bool mpa_reply_processed;
/* For Passive side - syn packet related data */
u16 syn_ip_payload_length;
struct qed_iwarp_ll2_buff *syn;
dma_addr_t syn_phy_addr;
/* The event_cb function is called for asynchrounous events associated
* with the ep. It is initialized at different entry points depending
* on whether the ep is the tcp connection active side or passive side
* The cb_context is passed to the event_cb function.
*/
iwarp_event_handler event_cb;
void *cb_context;
};
struct qed_iwarp_listener {
struct list_head list_entry;
/* The event_cb function is called for connection requests.
* The cb_context is passed to the event_cb function.
*/
iwarp_event_handler event_cb;
void *cb_context;
u32 max_backlog;
u32 ip_addr[4];
u16 port;
u16 vlan;
u8 ip_version;
};
int qed_iwarp_alloc(struct qed_hwfn *p_hwfn);
int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params);
int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn);
void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn);
void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
struct qed_rdma_create_qp_out_params *out_params);
int qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp,
enum qed_iwarp_qp_state new_state, bool internal);
int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp);
int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp);
void qed_iwarp_query_qp(struct qed_rdma_qp *qp,
struct qed_rdma_query_qp_out_params *out_params);
int
qed_iwarp_connect(void *rdma_cxt,
struct qed_iwarp_connect_in *iparams,
struct qed_iwarp_connect_out *oparams);
int
qed_iwarp_create_listen(void *rdma_cxt,
struct qed_iwarp_listen_in *iparams,
struct qed_iwarp_listen_out *oparams);
int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams);
int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams);
int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle);
int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams);
#endif
...@@ -79,8 +79,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn) ...@@ -79,8 +79,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn)
unsigned long **pp_qids; unsigned long **pp_qids;
u32 i; u32 i;
if (p_hwfn->hw_info.personality != QED_PCI_ETH && if (!QED_IS_L2_PERSONALITY(p_hwfn))
p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
return 0; return 0;
p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL); p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
...@@ -1228,19 +1227,6 @@ static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) ...@@ -1228,19 +1227,6 @@ static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
return action; return action;
} }
static void qed_set_fw_mac_addr(__le16 *fw_msb,
__le16 *fw_mid,
__le16 *fw_lsb,
u8 *mac)
{
((u8 *)fw_msb)[0] = mac[1];
((u8 *)fw_msb)[1] = mac[0];
((u8 *)fw_mid)[0] = mac[3];
((u8 *)fw_mid)[1] = mac[2];
((u8 *)fw_lsb)[0] = mac[5];
((u8 *)fw_lsb)[1] = mac[4];
}
static int static int
qed_filter_ucast_common(struct qed_hwfn *p_hwfn, qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
u16 opaque_fid, u16 opaque_fid,
......
...@@ -309,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -309,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_del(&p_pkt->list_entry); list_del(&p_pkt->list_entry);
b_last_packet = list_empty(&p_tx->active_descq); b_last_packet = list_empty(&p_tx->active_descq);
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) { if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
struct qed_ooo_buffer *p_buffer; struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
...@@ -532,7 +532,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -532,7 +532,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) { if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
struct qed_ooo_buffer *p_buffer; struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
...@@ -893,11 +893,11 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -893,11 +893,11 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en; p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_OOO) ? 0 : 1;
: 1;
if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) { p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) &&
(conn_type != QED_LL2_TYPE_IWARP)) {
p_ramrod->mf_si_bcast_accept_all = 1; p_ramrod->mf_si_bcast_accept_all = 1;
p_ramrod->mf_si_mcast_accept_all = 1; p_ramrod->mf_si_mcast_accept_all = 1;
} else { } else {
...@@ -924,7 +924,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -924,7 +924,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0; return 0;
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
p_ll2_conn->tx_stats_en = 0; p_ll2_conn->tx_stats_en = 0;
else else
p_ll2_conn->tx_stats_en = 1; p_ll2_conn->tx_stats_en = 1;
...@@ -955,10 +955,10 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -955,10 +955,10 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->pbl_size = cpu_to_le16(pbl_size); p_ramrod->pbl_size = cpu_to_le16(pbl_size);
switch (p_ll2_conn->input.tx_tc) { switch (p_ll2_conn->input.tx_tc) {
case LB_TC: case PURE_LB_TC:
pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
break; break;
case OOO_LB_TC: case PKT_LB_TC:
pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO); pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
break; break;
default: default:
...@@ -973,12 +973,20 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -973,12 +973,20 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->conn_type = PROTOCOLID_FCOE; p_ramrod->conn_type = PROTOCOLID_FCOE;
break; break;
case QED_LL2_TYPE_ISCSI: case QED_LL2_TYPE_ISCSI:
case QED_LL2_TYPE_ISCSI_OOO:
p_ramrod->conn_type = PROTOCOLID_ISCSI; p_ramrod->conn_type = PROTOCOLID_ISCSI;
break; break;
case QED_LL2_TYPE_ROCE: case QED_LL2_TYPE_ROCE:
p_ramrod->conn_type = PROTOCOLID_ROCE; p_ramrod->conn_type = PROTOCOLID_ROCE;
break; break;
case QED_LL2_TYPE_IWARP:
p_ramrod->conn_type = PROTOCOLID_IWARP;
break;
case QED_LL2_TYPE_OOO:
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
p_ramrod->conn_type = PROTOCOLID_ISCSI;
else
p_ramrod->conn_type = PROTOCOLID_IWARP;
break;
default: default:
p_ramrod->conn_type = PROTOCOLID_ETH; p_ramrod->conn_type = PROTOCOLID_ETH;
DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
...@@ -1142,7 +1150,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, ...@@ -1142,7 +1150,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
u16 buf_idx; u16 buf_idx;
int rc = 0; int rc = 0;
if (p_ll2_info->input.conn_type != QED_LL2_TYPE_ISCSI_OOO) if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO)
return rc; return rc;
/* Correct number of requested OOO buffers if needed */ /* Correct number of requested OOO buffers if needed */
...@@ -1280,7 +1288,7 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) ...@@ -1280,7 +1288,7 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
goto q_allocate_fail; goto q_allocate_fail;
/* Register callbacks for the Rx/Tx queues */ /* Register callbacks for the Rx/Tx queues */
if (data->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) { if (data->input.conn_type == QED_LL2_TYPE_OOO) {
comp_rx_cb = qed_ll2_lb_rxq_completion; comp_rx_cb = qed_ll2_lb_rxq_completion;
comp_tx_cb = qed_ll2_lb_txq_completion; comp_tx_cb = qed_ll2_lb_txq_completion;
} else { } else {
...@@ -1339,7 +1347,7 @@ static void ...@@ -1339,7 +1347,7 @@ static void
qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn) struct qed_ll2_info *p_ll2_conn)
{ {
if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO) if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
return; return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
...@@ -1421,7 +1429,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) ...@@ -1421,7 +1429,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
if (rc) if (rc)
goto out; goto out;
if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1); qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
...@@ -1794,7 +1802,7 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) ...@@ -1794,7 +1802,7 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
qed_ll2_rxq_flush(p_hwfn, connection_handle); qed_ll2_rxq_flush(p_hwfn, connection_handle);
} }
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
...@@ -1816,7 +1824,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, ...@@ -1816,7 +1824,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
{ {
struct qed_ooo_buffer *p_buffer; struct qed_ooo_buffer *p_buffer;
if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO) if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
return; return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
...@@ -2063,7 +2071,7 @@ static void qed_ll2_set_conn_data(struct qed_dev *cdev, ...@@ -2063,7 +2071,7 @@ static void qed_ll2_set_conn_data(struct qed_dev *cdev,
ll2_cbs.cookie = QED_LEADING_HWFN(cdev); ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
if (lb) { if (lb) {
data->input.tx_tc = OOO_LB_TC; data->input.tx_tc = PKT_LB_TC;
data->input.tx_dest = QED_LL2_TX_DEST_LB; data->input.tx_dest = QED_LL2_TX_DEST_LB;
} else { } else {
data->input.tx_tc = 0; data->input.tx_tc = 0;
...@@ -2080,7 +2088,7 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev, ...@@ -2080,7 +2088,7 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
int rc; int rc;
qed_ll2_set_conn_data(cdev, &data, params, qed_ll2_set_conn_data(cdev, &data, params,
QED_LL2_TYPE_ISCSI_OOO, handle, true); QED_LL2_TYPE_OOO, handle, true);
rc = qed_ll2_acquire_connection(hwfn, &data); rc = qed_ll2_acquire_connection(hwfn, &data);
if (rc) { if (rc) {
......
...@@ -237,6 +237,8 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) ...@@ -237,6 +237,8 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
int qed_fill_dev_info(struct qed_dev *cdev, int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info) struct qed_dev_info *dev_info)
{ {
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_hw_info *hw_info = &p_hwfn->hw_info;
struct qed_tunnel_info *tun = &cdev->tunnel; struct qed_tunnel_info *tun = &cdev->tunnel;
struct qed_ptt *ptt; struct qed_ptt *ptt;
...@@ -260,11 +262,10 @@ int qed_fill_dev_info(struct qed_dev *cdev, ...@@ -260,11 +262,10 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_start = cdev->pci_params.mem_start; dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq; dev_info->pci_irq = cdev->pci_params.irq;
dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality == dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
QED_PCI_ETH_ROCE);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
dev_info->dev_type = cdev->type; dev_info->dev_type = cdev->type;
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
if (IS_PF(cdev)) { if (IS_PF(cdev)) {
dev_info->fw_major = FW_MAJOR_VERSION; dev_info->fw_major = FW_MAJOR_VERSION;
...@@ -274,8 +275,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, ...@@ -274,8 +275,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->mf_mode = cdev->mf_mode; dev_info->mf_mode = cdev->mf_mode;
dev_info->tx_switching = true; dev_info->tx_switching = true;
if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support == if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
QED_WOL_SUPPORT_PME)
dev_info->wol_support = true; dev_info->wol_support = true;
dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
...@@ -304,7 +304,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, ...@@ -304,7 +304,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
&dev_info->mfw_rev, NULL); &dev_info->mfw_rev, NULL);
} }
dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu; dev_info->mtu = hw_info->mtu;
return 0; return 0;
} }
...@@ -790,7 +790,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, ...@@ -790,7 +790,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->num_hwfns; cdev->num_hwfns;
if (!IS_ENABLED(CONFIG_QED_RDMA) || if (!IS_ENABLED(CONFIG_QED_RDMA) ||
QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE) !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
return 0; return 0;
for_each_hwfn(cdev, i) for_each_hwfn(cdev, i)
...@@ -931,8 +931,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, ...@@ -931,8 +931,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
/* In case we might support RDMA, don't allow qede to be greedy /* In case we might support RDMA, don't allow qede to be greedy
* with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn. * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
*/ */
if (QED_LEADING_HWFN(cdev)->hw_info.personality == if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
QED_PCI_ETH_ROCE) {
u16 *num_cons; u16 *num_cons;
num_cons = &params->eth_pf_params.num_cons; num_cons = &params->eth_pf_params.num_cons;
......
...@@ -161,7 +161,10 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, ...@@ -161,7 +161,10 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
NULL); NULL);
p_rdma_info->num_qps = num_cons / 2; if (QED_IS_IWARP_PERSONALITY(p_hwfn))
p_rdma_info->num_qps = num_cons;
else
p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */
num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
...@@ -252,6 +255,13 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, ...@@ -252,6 +255,13 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
"Failed to allocate real cid bitmap, rc = %d\n", rc); "Failed to allocate real cid bitmap, rc = %d\n", rc);
goto free_cid_map; goto free_cid_map;
} }
if (QED_IS_IWARP_PERSONALITY(p_hwfn))
rc = qed_iwarp_alloc(p_hwfn);
if (rc)
goto free_cid_map;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
return 0; return 0;
...@@ -329,6 +339,9 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) ...@@ -329,6 +339,9 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
{ {
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
if (QED_IS_IWARP_PERSONALITY(p_hwfn))
qed_iwarp_resc_free(p_hwfn);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
...@@ -470,6 +483,9 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, ...@@ -470,6 +483,9 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
if (QED_IS_IWARP_PERSONALITY(p_hwfn))
qed_iwarp_init_devinfo(p_hwfn);
} }
static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
...@@ -490,29 +506,17 @@ static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) ...@@ -490,29 +506,17 @@ static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
u32 ll2_ethertype_en; int rc = 0;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
p_hwfn->b_rdma_enabled_in_prs = false; p_hwfn->b_rdma_enabled_in_prs = false;
qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); if (QED_IS_IWARP_PERSONALITY(p_hwfn))
qed_iwarp_init_hw(p_hwfn, p_ptt);
p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE; else
rc = qed_roce_init_hw(p_hwfn, p_ptt);
/* We delay writing to this reg until first cid is allocated. See
* qed_cxt_dynamic_ilt_alloc function for more details
*/
ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
(ll2_ethertype_en | 0x01));
if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
return -EINVAL;
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n"); return rc;
return 0;
} }
static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
...@@ -544,6 +548,9 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, ...@@ -544,6 +548,9 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
return rc; return rc;
if (QED_IS_IWARP_PERSONALITY(p_hwfn))
p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
else
p_ramrod = &p_ent->ramrod.roce_init_func.rdma; p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
p_params_header = &p_ramrod->params_header; p_params_header = &p_ramrod->params_header;
...@@ -641,7 +648,15 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, ...@@ -641,7 +648,15 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
return rc; return rc;
qed_roce_setup(p_hwfn); if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
rc = qed_iwarp_setup(p_hwfn, p_ptt, params);
if (rc)
return rc;
} else {
rc = qed_roce_setup(p_hwfn);
if (rc)
return rc;
}
return qed_rdma_start_fw(p_hwfn, params, p_ptt); return qed_rdma_start_fw(p_hwfn, params, p_ptt);
} }
...@@ -675,7 +690,16 @@ int qed_rdma_stop(void *rdma_cxt) ...@@ -675,7 +690,16 @@ int qed_rdma_stop(void *rdma_cxt)
qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
(ll2_ethertype_en & 0xFFFE)); (ll2_ethertype_en & 0xFFFE));
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
rc = qed_iwarp_stop(p_hwfn, p_ptt);
if (rc) {
qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
} else {
qed_roce_stop(p_hwfn); qed_roce_stop(p_hwfn);
}
qed_ptt_release(p_hwfn, p_ptt); qed_ptt_release(p_hwfn, p_ptt);
/* Get SPQ entry */ /* Get SPQ entry */
...@@ -810,7 +834,9 @@ static int qed_fill_rdma_dev_info(struct qed_dev *cdev, ...@@ -810,7 +834,9 @@ static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
memset(info, 0, sizeof(*info)); memset(info, 0, sizeof(*info));
info->rdma_type = QED_RDMA_TYPE_ROCE; info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ?
QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP;
info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0); info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
qed_fill_dev_info(cdev, &info->common); qed_fill_dev_info(cdev, &info->common);
...@@ -1112,7 +1138,7 @@ static int qed_rdma_query_qp(void *rdma_cxt, ...@@ -1112,7 +1138,7 @@ static int qed_rdma_query_qp(void *rdma_cxt,
struct qed_rdma_query_qp_out_params *out_params) struct qed_rdma_query_qp_out_params *out_params)
{ {
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc; int rc = 0;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
...@@ -1138,6 +1164,9 @@ static int qed_rdma_query_qp(void *rdma_cxt, ...@@ -1138,6 +1164,9 @@ static int qed_rdma_query_qp(void *rdma_cxt,
out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp; out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
out_params->sqd_async = qp->sqd_async; out_params->sqd_async = qp->sqd_async;
if (QED_IS_IWARP_PERSONALITY(p_hwfn))
qed_iwarp_query_qp(qp, out_params);
else
rc = qed_roce_query_qp(p_hwfn, qp, out_params); rc = qed_roce_query_qp(p_hwfn, qp, out_params);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
...@@ -1151,6 +1180,9 @@ static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) ...@@ -1151,6 +1180,9 @@ static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
if (QED_IS_IWARP_PERSONALITY(p_hwfn))
rc = qed_iwarp_destroy_qp(p_hwfn, qp);
else
rc = qed_roce_destroy_qp(p_hwfn, qp); rc = qed_roce_destroy_qp(p_hwfn, qp);
/* free qp params struct */ /* free qp params struct */
...@@ -1190,19 +1222,26 @@ qed_rdma_create_qp(void *rdma_cxt, ...@@ -1190,19 +1222,26 @@ qed_rdma_create_qp(void *rdma_cxt,
return NULL; return NULL;
} }
qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
if (!qp) if (in_params->sq_num_pages * sizeof(struct regpair) >
IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) {
DP_NOTICE(p_hwfn->cdev,
"Sq num pages: %d exceeds maximum\n",
in_params->sq_num_pages);
return NULL; return NULL;
}
rc = qed_roce_alloc_cid(p_hwfn, &qp->icid); if (in_params->rq_num_pages * sizeof(struct regpair) >
qp->qpid = ((0xFF << 16) | qp->icid); IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) {
DP_NOTICE(p_hwfn->cdev,
DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid); "Rq num pages: %d exceeds maximum\n",
in_params->rq_num_pages);
if (rc) {
kfree(qp);
return NULL; return NULL;
} }
}
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return NULL;
qp->cur_state = QED_ROCE_QP_STATE_RESET; qp->cur_state = QED_ROCE_QP_STATE_RESET;
qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi); qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
...@@ -1226,6 +1265,19 @@ qed_rdma_create_qp(void *rdma_cxt, ...@@ -1226,6 +1265,19 @@ qed_rdma_create_qp(void *rdma_cxt,
qp->e2e_flow_control_en = qp->use_srq ? false : true; qp->e2e_flow_control_en = qp->use_srq ? false : true;
qp->stats_queue = in_params->stats_queue; qp->stats_queue = in_params->stats_queue;
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
rc = qed_iwarp_create_qp(p_hwfn, qp, out_params);
qp->qpid = qp->icid;
} else {
rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
qp->qpid = ((0xFF << 16) | qp->icid);
}
if (rc) {
kfree(qp);
return NULL;
}
out_params->icid = qp->icid; out_params->icid = qp->icid;
out_params->qp_id = qp->qpid; out_params->qp_id = qp->qpid;
...@@ -1324,7 +1376,14 @@ static int qed_rdma_modify_qp(void *rdma_cxt, ...@@ -1324,7 +1376,14 @@ static int qed_rdma_modify_qp(void *rdma_cxt,
qp->cur_state); qp->cur_state);
} }
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
enum qed_iwarp_qp_state new_state =
qed_roce2iwarp_state(qp->cur_state);
rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0);
} else {
rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
return rc; return rc;
...@@ -1713,6 +1772,12 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = { ...@@ -1713,6 +1772,12 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
.ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet, .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
.ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
.ll2_get_stats = &qed_ll2_get_stats, .ll2_get_stats = &qed_ll2_get_stats,
.iwarp_connect = &qed_iwarp_connect,
.iwarp_create_listen = &qed_iwarp_create_listen,
.iwarp_destroy_listen = &qed_iwarp_destroy_listen,
.iwarp_accept = &qed_iwarp_accept,
.iwarp_reject = &qed_iwarp_reject,
.iwarp_send_rtr = &qed_iwarp_send_rtr,
}; };
const struct qed_rdma_ops *qed_get_rdma_ops(void) const struct qed_rdma_ops *qed_get_rdma_ops(void)
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include "qed.h" #include "qed.h"
#include "qed_dev_api.h" #include "qed_dev_api.h"
#include "qed_hsi.h" #include "qed_hsi.h"
#include "qed_iwarp.h"
#include "qed_roce.h" #include "qed_roce.h"
#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS) #define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
...@@ -84,6 +85,7 @@ struct qed_rdma_info { ...@@ -84,6 +85,7 @@ struct qed_rdma_info {
struct qed_bmap qp_map; struct qed_bmap qp_map;
struct qed_bmap srq_map; struct qed_bmap srq_map;
struct qed_bmap cid_map; struct qed_bmap cid_map;
struct qed_bmap tcp_cid_map;
struct qed_bmap real_cid_map; struct qed_bmap real_cid_map;
struct qed_bmap dpi_map; struct qed_bmap dpi_map;
struct qed_bmap toggle_bits; struct qed_bmap toggle_bits;
...@@ -97,6 +99,7 @@ struct qed_rdma_info { ...@@ -97,6 +99,7 @@ struct qed_rdma_info {
u16 queue_zone_base; u16 queue_zone_base;
u16 max_queue_zones; u16 max_queue_zones;
enum protocol_type proto; enum protocol_type proto;
struct qed_iwarp_info iwarp;
}; };
struct qed_rdma_qp { struct qed_rdma_qp {
...@@ -105,6 +108,7 @@ struct qed_rdma_qp { ...@@ -105,6 +108,7 @@ struct qed_rdma_qp {
u32 qpid; u32 qpid;
u16 icid; u16 icid;
enum qed_roce_qp_state cur_state; enum qed_roce_qp_state cur_state;
enum qed_iwarp_qp_state iwarp_state;
bool use_srq; bool use_srq;
bool signal_all; bool signal_all;
bool fmr_and_reserved_lkey; bool fmr_and_reserved_lkey;
...@@ -164,6 +168,7 @@ struct qed_rdma_qp { ...@@ -164,6 +168,7 @@ struct qed_rdma_qp {
void *shared_queue; void *shared_queue;
dma_addr_t shared_queue_phys_addr; dma_addr_t shared_queue_phys_addr;
struct qed_iwarp_ep *ep;
}; };
#if IS_ENABLED(CONFIG_QED_RDMA) #if IS_ENABLED(CONFIG_QED_RDMA)
......
...@@ -1149,3 +1149,23 @@ int qed_roce_setup(struct qed_hwfn *p_hwfn) ...@@ -1149,3 +1149,23 @@ int qed_roce_setup(struct qed_hwfn *p_hwfn)
qed_roce_async_event); qed_roce_async_event);
} }
int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 ll2_ethertype_en;
qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
(ll2_ethertype_en | 0x01));
if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
return -EINVAL;
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
return 0;
}
...@@ -104,12 +104,17 @@ union ramrod_data { ...@@ -104,12 +104,17 @@ union ramrod_data {
struct roce_query_qp_req_ramrod_data roce_query_qp_req; struct roce_query_qp_req_ramrod_data roce_query_qp_req;
struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp; struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req; struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
struct roce_init_func_ramrod_data roce_init_func;
struct rdma_create_cq_ramrod_data rdma_create_cq; struct rdma_create_cq_ramrod_data rdma_create_cq;
struct rdma_destroy_cq_ramrod_data rdma_destroy_cq; struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
struct rdma_srq_create_ramrod_data rdma_create_srq; struct rdma_srq_create_ramrod_data rdma_create_srq;
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
struct rdma_srq_modify_ramrod_data rdma_modify_srq; struct rdma_srq_modify_ramrod_data rdma_modify_srq;
struct roce_init_func_ramrod_data roce_init_func; struct iwarp_create_qp_ramrod_data iwarp_create_qp;
struct iwarp_tcp_offload_ramrod_data iwarp_tcp_offload;
struct iwarp_mpa_offload_ramrod_data iwarp_mpa_offload;
struct iwarp_modify_qp_ramrod_data iwarp_modify_qp;
struct iwarp_init_func_ramrod_data iwarp_init_func;
struct fcoe_init_ramrod_params fcoe_init; struct fcoe_init_ramrod_params fcoe_init;
struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld; struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld;
struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate; struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
......
...@@ -38,6 +38,8 @@ ...@@ -38,6 +38,8 @@
#include <linux/slab.h> #include <linux/slab.h>
/* dma_addr_t manip */ /* dma_addr_t manip */
#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff))
#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16))
#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
#define DMA_REGPAIR_LE(x, val) do { \ #define DMA_REGPAIR_LE(x, val) do { \
...@@ -778,7 +780,7 @@ enum protocol_type { ...@@ -778,7 +780,7 @@ enum protocol_type {
PROTOCOLID_ROCE, PROTOCOLID_ROCE,
PROTOCOLID_CORE, PROTOCOLID_CORE,
PROTOCOLID_ETH, PROTOCOLID_ETH,
PROTOCOLID_RESERVED4, PROTOCOLID_IWARP,
PROTOCOLID_RESERVED5, PROTOCOLID_RESERVED5,
PROTOCOLID_PREROCE, PROTOCOLID_PREROCE,
PROTOCOLID_COMMON, PROTOCOLID_COMMON,
......
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and /or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __IWARP_COMMON__
#define __IWARP_COMMON__
#include <linux/qed/rdma_common.h>
/************************/
/* IWARP FW CONSTANTS */
/************************/
#define IWARP_ACTIVE_MODE 0
#define IWARP_PASSIVE_MODE 1
#define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000)
#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000)
#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000)
#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000)
#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000)
#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128)
#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176)
#define IWARP_MAX_QPS (64 * 1024)
#endif /* __IWARP_COMMON__ */
...@@ -47,9 +47,10 @@ enum qed_ll2_conn_type { ...@@ -47,9 +47,10 @@ enum qed_ll2_conn_type {
QED_LL2_TYPE_FCOE, QED_LL2_TYPE_FCOE,
QED_LL2_TYPE_ISCSI, QED_LL2_TYPE_ISCSI,
QED_LL2_TYPE_TEST, QED_LL2_TYPE_TEST,
QED_LL2_TYPE_ISCSI_OOO, QED_LL2_TYPE_OOO,
QED_LL2_TYPE_RESERVED2, QED_LL2_TYPE_RESERVED2,
QED_LL2_TYPE_ROCE, QED_LL2_TYPE_ROCE,
QED_LL2_TYPE_IWARP,
QED_LL2_TYPE_RESERVED3, QED_LL2_TYPE_RESERVED3,
MAX_QED_LL2_RX_CONN_TYPE MAX_QED_LL2_RX_CONN_TYPE
}; };
......
...@@ -470,6 +470,101 @@ struct qed_rdma_counters_out_params { ...@@ -470,6 +470,101 @@ struct qed_rdma_counters_out_params {
#define QED_ROCE_TX_HEAD_FAILURE (1) #define QED_ROCE_TX_HEAD_FAILURE (1)
#define QED_ROCE_TX_FRAG_FAILURE (2) #define QED_ROCE_TX_FRAG_FAILURE (2)
enum qed_iwarp_event_type {
QED_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */
QED_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
QED_IWARP_EVENT_DISCONNECT,
QED_IWARP_EVENT_CLOSE,
QED_IWARP_EVENT_IRQ_FULL,
QED_IWARP_EVENT_RQ_EMPTY,
QED_IWARP_EVENT_LLP_TIMEOUT,
QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
QED_IWARP_EVENT_CQ_OVERFLOW,
QED_IWARP_EVENT_QP_CATASTROPHIC,
QED_IWARP_EVENT_ACTIVE_MPA_REPLY,
QED_IWARP_EVENT_LOCAL_ACCESS_ERROR,
QED_IWARP_EVENT_REMOTE_OPERATION_ERROR,
QED_IWARP_EVENT_TERMINATE_RECEIVED
};
enum qed_tcp_ip_version {
QED_TCP_IPV4,
QED_TCP_IPV6,
};
struct qed_iwarp_cm_info {
enum qed_tcp_ip_version ip_version;
u32 remote_ip[4];
u32 local_ip[4];
u16 remote_port;
u16 local_port;
u16 vlan;
u8 ord;
u8 ird;
u16 private_data_len;
const void *private_data;
};
struct qed_iwarp_cm_event_params {
enum qed_iwarp_event_type event;
const struct qed_iwarp_cm_info *cm_info;
void *ep_context; /* To be passed to accept call */
int status;
};
typedef int (*iwarp_event_handler) (void *context,
struct qed_iwarp_cm_event_params *event);
struct qed_iwarp_connect_in {
iwarp_event_handler event_cb;
void *cb_context;
struct qed_rdma_qp *qp;
struct qed_iwarp_cm_info cm_info;
u16 mss;
u8 remote_mac_addr[ETH_ALEN];
u8 local_mac_addr[ETH_ALEN];
};
struct qed_iwarp_connect_out {
void *ep_context;
};
struct qed_iwarp_listen_in {
iwarp_event_handler event_cb;
void *cb_context; /* passed to event_cb */
u32 max_backlog;
enum qed_tcp_ip_version ip_version;
u32 ip_addr[4];
u16 port;
u16 vlan;
};
struct qed_iwarp_listen_out {
void *handle;
};
struct qed_iwarp_accept_in {
void *ep_context;
void *cb_context;
struct qed_rdma_qp *qp;
const void *private_data;
u16 private_data_len;
u8 ord;
u8 ird;
};
struct qed_iwarp_reject_in {
void *ep_context;
void *cb_context;
const void *private_data;
u16 private_data_len;
};
struct qed_iwarp_send_rtr_in {
void *ep_context;
};
struct qed_roce_ll2_header { struct qed_roce_ll2_header {
void *vaddr; void *vaddr;
dma_addr_t baddr; dma_addr_t baddr;
...@@ -491,6 +586,7 @@ struct qed_roce_ll2_packet { ...@@ -491,6 +586,7 @@ struct qed_roce_ll2_packet {
enum qed_rdma_type { enum qed_rdma_type {
QED_RDMA_TYPE_ROCE, QED_RDMA_TYPE_ROCE,
QED_RDMA_TYPE_IWARP
}; };
struct qed_dev_rdma_info { struct qed_dev_rdma_info {
...@@ -575,6 +671,24 @@ struct qed_rdma_ops { ...@@ -575,6 +671,24 @@ struct qed_rdma_ops {
int (*ll2_set_mac_filter)(struct qed_dev *cdev, int (*ll2_set_mac_filter)(struct qed_dev *cdev,
u8 *old_mac_address, u8 *new_mac_address); u8 *old_mac_address, u8 *new_mac_address);
int (*iwarp_connect)(void *rdma_cxt,
struct qed_iwarp_connect_in *iparams,
struct qed_iwarp_connect_out *oparams);
int (*iwarp_create_listen)(void *rdma_cxt,
struct qed_iwarp_listen_in *iparams,
struct qed_iwarp_listen_out *oparams);
int (*iwarp_accept)(void *rdma_cxt,
struct qed_iwarp_accept_in *iparams);
int (*iwarp_reject)(void *rdma_cxt,
struct qed_iwarp_reject_in *iparams);
int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle);
int (*iwarp_send_rtr)(void *rdma_cxt,
struct qed_iwarp_send_rtr_in *iparams);
}; };
const struct qed_rdma_ops *qed_get_rdma_ops(void); const struct qed_rdma_ops *qed_get_rdma_ops(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment