Commit 2da95be9 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-Add-iWARP-support-for-QL4xxxx'

Michal Kalderon says:

====================
qed: Add iWARP support for QL4xxxx

This patch series adds iWARP support to our QL4xxxx networking adapters.
The code changes span across qed and qedr drivers, but this series contains
changes to qed only. Once the series is accepted, the qedr series will
be submitted to the rdma tree.
There is one additional qed patch which enables the iWARP, this patch is
delayed until the qedr series will be accepted.

The patches were previously sent as an RFC, and these are the first 12
patches in the RFC series:
https://www.spinics.net/lists/linux-rdma/msg51416.html

This series was tested and built against net-next.

MAINTAINERS file is not updated in this PATCH as there is a pending patch
for qedr driver update https://patchwork.kernel.org/patch/9752761.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a5192c52 93c45984
...@@ -5,6 +5,6 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ ...@@ -5,6 +5,6 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o
qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o
qed-$(CONFIG_QED_FCOE) += qed_fcoe.o qed-$(CONFIG_QED_FCOE) += qed_fcoe.o
...@@ -210,14 +210,16 @@ struct qed_tunn_update_params { ...@@ -210,14 +210,16 @@ struct qed_tunn_update_params {
/* The PCI personality is not quite synonymous to protocol ID: /* The PCI personality is not quite synonymous to protocol ID:
* 1. All personalities need CORE connections * 1. All personalities need CORE connections
* 2. The Ethernet personality may support also the RoCE protocol * 2. The Ethernet personality may support also the RoCE/iWARP protocol
*/ */
enum qed_pci_personality { enum qed_pci_personality {
QED_PCI_ETH, QED_PCI_ETH,
QED_PCI_FCOE, QED_PCI_FCOE,
QED_PCI_ISCSI, QED_PCI_ISCSI,
QED_PCI_ETH_ROCE, QED_PCI_ETH_ROCE,
QED_PCI_DEFAULT /* default in shmem */ QED_PCI_ETH_IWARP,
QED_PCI_ETH_RDMA,
QED_PCI_DEFAULT, /* default in shmem */
}; };
/* All VFs are symmetric, all counters are PF + all VFs */ /* All VFs are symmetric, all counters are PF + all VFs */
...@@ -277,6 +279,7 @@ enum qed_dev_cap { ...@@ -277,6 +279,7 @@ enum qed_dev_cap {
QED_DEV_CAP_FCOE, QED_DEV_CAP_FCOE,
QED_DEV_CAP_ISCSI, QED_DEV_CAP_ISCSI,
QED_DEV_CAP_ROCE, QED_DEV_CAP_ROCE,
QED_DEV_CAP_IWARP,
}; };
enum qed_wol_support { enum qed_wol_support {
...@@ -286,7 +289,24 @@ enum qed_wol_support { ...@@ -286,7 +289,24 @@ enum qed_wol_support {
struct qed_hw_info { struct qed_hw_info {
/* PCI personality */ /* PCI personality */
enum qed_pci_personality personality; enum qed_pci_personality personality;
#define QED_IS_RDMA_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
(dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
(dev)->hw_info.personality == QED_PCI_ETH_RDMA)
#define QED_IS_ROCE_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
(dev)->hw_info.personality == QED_PCI_ETH_RDMA)
#define QED_IS_IWARP_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
(dev)->hw_info.personality == QED_PCI_ETH_RDMA)
#define QED_IS_L2_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ETH || \
QED_IS_RDMA_PERSONALITY(dev))
#define QED_IS_FCOE_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_FCOE)
#define QED_IS_ISCSI_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ISCSI)
/* Resource Allocation scheme results */ /* Resource Allocation scheme results */
u32 resc_start[QED_MAX_RESC]; u32 resc_start[QED_MAX_RESC];
...@@ -759,7 +779,7 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, ...@@ -759,7 +779,7 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
} }
#define PURE_LB_TC 8 #define PURE_LB_TC 8
#define OOO_LB_TC 9 #define PKT_LB_TC 9
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
...@@ -769,6 +789,8 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, ...@@ -769,6 +789,8 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_device_num_engines(struct qed_dev *cdev); int qed_device_num_engines(struct qed_dev *cdev);
int qed_device_get_port_id(struct qed_dev *cdev); int qed_device_get_port_id(struct qed_dev *cdev);
void qed_set_fw_mac_addr(__le16 *fw_msb,
__le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
......
...@@ -246,14 +246,16 @@ struct qed_cxt_mngr { ...@@ -246,14 +246,16 @@ struct qed_cxt_mngr {
static bool src_proto(enum protocol_type type) static bool src_proto(enum protocol_type type)
{ {
return type == PROTOCOLID_ISCSI || return type == PROTOCOLID_ISCSI ||
type == PROTOCOLID_FCOE; type == PROTOCOLID_FCOE ||
type == PROTOCOLID_IWARP;
} }
static bool tm_cid_proto(enum protocol_type type) static bool tm_cid_proto(enum protocol_type type)
{ {
return type == PROTOCOLID_ISCSI || return type == PROTOCOLID_ISCSI ||
type == PROTOCOLID_FCOE || type == PROTOCOLID_FCOE ||
type == PROTOCOLID_ROCE; type == PROTOCOLID_ROCE ||
type == PROTOCOLID_IWARP;
} }
static bool tm_tid_proto(enum protocol_type type) static bool tm_tid_proto(enum protocol_type type)
...@@ -853,7 +855,7 @@ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines) ...@@ -853,7 +855,7 @@ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
if (!excess_lines) if (!excess_lines)
return 0; return 0;
if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
return 0; return 0;
p_mngr = p_hwfn->p_cxt_mngr; p_mngr = p_hwfn->p_cxt_mngr;
...@@ -1033,7 +1035,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, ...@@ -1033,7 +1035,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
u32 lines, line, sz_left, lines_to_skip = 0; u32 lines, line, sz_left, lines_to_skip = 0;
/* Special handling for RoCE that supports dynamic allocation */ /* Special handling for RoCE that supports dynamic allocation */
if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) && if (QED_IS_RDMA_PERSONALITY(p_hwfn) &&
((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM)) ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
return 0; return 0;
...@@ -1833,7 +1835,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) ...@@ -1833,7 +1835,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
tm_offset += tm_iids.pf_tids[i]; tm_offset += tm_iids.pf_tids[i];
} }
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) if (QED_IS_RDMA_PERSONALITY(p_hwfn))
active_seg_mask = 0; active_seg_mask = 0;
STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask); STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
...@@ -2068,6 +2070,11 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, ...@@ -2068,6 +2070,11 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs); num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
switch (p_hwfn->hw_info.personality) { switch (p_hwfn->hw_info.personality) {
case QED_PCI_ETH_IWARP:
/* Each QP requires one connection */
num_cons = min_t(u32, IWARP_MAX_QPS, p_params->num_qps);
proto = PROTOCOLID_IWARP;
break;
case QED_PCI_ETH_ROCE: case QED_PCI_ETH_ROCE:
num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps); num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
num_cons = num_qps * 2; /* each QP requires two connections */ num_cons = num_qps * 2; /* each QP requires two connections */
...@@ -2103,6 +2110,8 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) ...@@ -2103,6 +2110,8 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
switch (p_hwfn->hw_info.personality) { switch (p_hwfn->hw_info.personality) {
case QED_PCI_ETH_RDMA:
case QED_PCI_ETH_IWARP:
case QED_PCI_ETH_ROCE: case QED_PCI_ETH_ROCE:
{ {
qed_rdma_set_pf_params(p_hwfn, qed_rdma_set_pf_params(p_hwfn,
...@@ -2344,7 +2353,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, ...@@ -2344,7 +2353,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
last_cid_allocated - 1); last_cid_allocated - 1);
if (!p_hwfn->b_rdma_enabled_in_prs) { if (!p_hwfn->b_rdma_enabled_in_prs) {
/* Enable RoCE search */ /* Enable RDMA search */
qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
p_hwfn->b_rdma_enabled_in_prs = true; p_hwfn->b_rdma_enabled_in_prs = true;
} }
......
...@@ -216,6 +216,10 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn) ...@@ -216,6 +216,10 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
case QED_PCI_ETH_ROCE: case QED_PCI_ETH_ROCE:
flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
break; break;
case QED_PCI_ETH_IWARP:
flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
PQ_FLAGS_OFLD;
break;
default: default:
DP_ERR(p_hwfn, DP_ERR(p_hwfn,
"unknown personality %d\n", p_hwfn->hw_info.personality); "unknown personality %d\n", p_hwfn->hw_info.personality);
...@@ -936,9 +940,16 @@ int qed_resc_alloc(struct qed_dev *cdev) ...@@ -936,9 +940,16 @@ int qed_resc_alloc(struct qed_dev *cdev)
/* EQ */ /* EQ */
n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
enum protocol_type rdma_proto;
if (QED_IS_ROCE_PERSONALITY(p_hwfn))
rdma_proto = PROTOCOLID_ROCE;
else
rdma_proto = PROTOCOLID_IWARP;
num_cons = qed_cxt_get_proto_cid_count(p_hwfn, num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
PROTOCOLID_ROCE, rdma_proto,
NULL) * 2; NULL) * 2;
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
...@@ -2057,7 +2068,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -2057,7 +2068,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
qed_int_get_num_sbs(p_hwfn, &sb_cnt); qed_int_get_num_sbs(p_hwfn, &sb_cnt);
if (IS_ENABLED(CONFIG_QED_RDMA) && if (IS_ENABLED(CONFIG_QED_RDMA) &&
p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { QED_IS_RDMA_PERSONALITY(p_hwfn)) {
/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
* the status blocks equally between L2 / RoCE but with * the status blocks equally between L2 / RoCE but with
* consideration as to how many l2 queues / cnqs we have. * consideration as to how many l2 queues / cnqs we have.
...@@ -2068,9 +2079,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -2068,9 +2079,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
non_l2_sbs = feat_num[QED_RDMA_CNQ]; non_l2_sbs = feat_num[QED_RDMA_CNQ];
} }
if (QED_IS_L2_PERSONALITY(p_hwfn)) {
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
p_hwfn->hw_info.personality == QED_PCI_ETH) {
/* Start by allocating VF queues, then PF's */ /* Start by allocating VF queues, then PF's */
feat_num[QED_VF_L2_QUE] = min_t(u32, feat_num[QED_VF_L2_QUE] = min_t(u32,
RESC_NUM(p_hwfn, QED_L2_QUEUE), RESC_NUM(p_hwfn, QED_L2_QUEUE),
...@@ -2083,12 +2092,12 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -2083,12 +2092,12 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
QED_VF_L2_QUE)); QED_VF_L2_QUE));
} }
if (p_hwfn->hw_info.personality == QED_PCI_FCOE) if (QED_IS_FCOE_PERSONALITY(p_hwfn))
feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt, feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt,
RESC_NUM(p_hwfn, RESC_NUM(p_hwfn,
QED_CMDQS_CQS)); QED_CMDQS_CQS));
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt, feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
RESC_NUM(p_hwfn, RESC_NUM(p_hwfn,
QED_CMDQS_CQS)); QED_CMDQS_CQS));
...@@ -4122,3 +4131,14 @@ int qed_device_get_port_id(struct qed_dev *cdev) ...@@ -4122,3 +4131,14 @@ int qed_device_get_port_id(struct qed_dev *cdev)
{ {
return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev); return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev);
} }
void qed_set_fw_mac_addr(__le16 *fw_msb,
__le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
{
((u8 *)fw_msb)[0] = mac[1];
((u8 *)fw_msb)[1] = mac[0];
((u8 *)fw_mid)[0] = mac[3];
((u8 *)fw_mid)[1] = mac[2];
((u8 *)fw_lsb)[0] = mac[5];
((u8 *)fw_lsb)[1] = mac[4];
}
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include <linux/qed/fcoe_common.h> #include <linux/qed/fcoe_common.h>
#include <linux/qed/eth_common.h> #include <linux/qed/eth_common.h>
#include <linux/qed/iscsi_common.h> #include <linux/qed/iscsi_common.h>
#include <linux/qed/iwarp_common.h>
#include <linux/qed/rdma_common.h> #include <linux/qed/rdma_common.h>
#include <linux/qed/roce_common.h> #include <linux/qed/roce_common.h>
#include <linux/qed/qed_fcoe_if.h> #include <linux/qed/qed_fcoe_if.h>
......
This diff is collapsed.
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and /or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _QED_IWARP_H
#define _QED_IWARP_H
enum qed_iwarp_qp_state {
QED_IWARP_QP_STATE_IDLE,
QED_IWARP_QP_STATE_RTS,
QED_IWARP_QP_STATE_TERMINATE,
QED_IWARP_QP_STATE_CLOSING,
QED_IWARP_QP_STATE_ERROR,
};
enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
#define QED_IWARP_PREALLOC_CNT (256)
#define QED_IWARP_LL2_SYN_TX_SIZE (128)
#define QED_IWARP_LL2_SYN_RX_SIZE (256)
#define QED_IWARP_MAX_SYN_PKT_SIZE (128)
#define QED_IWARP_HANDLE_INVAL (0xff)
struct qed_iwarp_ll2_buff {
void *data;
dma_addr_t data_phys_addr;
u32 buff_size;
};
struct qed_iwarp_info {
struct list_head listen_list; /* qed_iwarp_listener */
struct list_head ep_list; /* qed_iwarp_ep */
struct list_head ep_free_list; /* pre-allocated ep's */
spinlock_t iw_lock; /* for iwarp resources */
spinlock_t qp_lock; /* for teardown races */
u32 rcv_wnd_scale;
u16 max_mtu;
u8 mac_addr[ETH_ALEN];
u8 crc_needed;
u8 tcp_flags;
u8 ll2_syn_handle;
u8 peer2peer;
enum mpa_negotiation_mode mpa_rev;
enum mpa_rtr_type rtr_type;
};
enum qed_iwarp_ep_state {
QED_IWARP_EP_INIT,
QED_IWARP_EP_MPA_REQ_RCVD,
QED_IWARP_EP_MPA_OFFLOADED,
QED_IWARP_EP_ESTABLISHED,
QED_IWARP_EP_CLOSED
};
union async_output {
struct iwarp_eqe_data_mpa_async_completion mpa_response;
struct iwarp_eqe_data_tcp_async_completion mpa_request;
};
#define QED_MAX_PRIV_DATA_LEN (512)
struct qed_iwarp_ep_memory {
u8 in_pdata[QED_MAX_PRIV_DATA_LEN];
u8 out_pdata[QED_MAX_PRIV_DATA_LEN];
union async_output async_output;
};
/* Endpoint structure represents a TCP connection. This connection can be
* associated with a QP or not (in which case QP==NULL)
*/
struct qed_iwarp_ep {
struct list_head list_entry;
struct qed_rdma_qp *qp;
struct qed_iwarp_ep_memory *ep_buffer_virt;
dma_addr_t ep_buffer_phys;
enum qed_iwarp_ep_state state;
int sig;
struct qed_iwarp_cm_info cm_info;
enum tcp_connect_mode connect_mode;
enum mpa_rtr_type rtr_type;
enum mpa_negotiation_mode mpa_rev;
u32 tcp_cid;
u32 cid;
u16 mss;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
bool mpa_reply_processed;
/* For Passive side - syn packet related data */
u16 syn_ip_payload_length;
struct qed_iwarp_ll2_buff *syn;
dma_addr_t syn_phy_addr;
/* The event_cb function is called for asynchrounous events associated
* with the ep. It is initialized at different entry points depending
* on whether the ep is the tcp connection active side or passive side
* The cb_context is passed to the event_cb function.
*/
iwarp_event_handler event_cb;
void *cb_context;
};
struct qed_iwarp_listener {
struct list_head list_entry;
/* The event_cb function is called for connection requests.
* The cb_context is passed to the event_cb function.
*/
iwarp_event_handler event_cb;
void *cb_context;
u32 max_backlog;
u32 ip_addr[4];
u16 port;
u16 vlan;
u8 ip_version;
};
int qed_iwarp_alloc(struct qed_hwfn *p_hwfn);
int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params);
int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn);
void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn);
void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
struct qed_rdma_create_qp_out_params *out_params);
int qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp,
enum qed_iwarp_qp_state new_state, bool internal);
int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp);
int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp);
void qed_iwarp_query_qp(struct qed_rdma_qp *qp,
struct qed_rdma_query_qp_out_params *out_params);
int
qed_iwarp_connect(void *rdma_cxt,
struct qed_iwarp_connect_in *iparams,
struct qed_iwarp_connect_out *oparams);
int
qed_iwarp_create_listen(void *rdma_cxt,
struct qed_iwarp_listen_in *iparams,
struct qed_iwarp_listen_out *oparams);
int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams);
int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams);
int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle);
int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams);
#endif
...@@ -79,8 +79,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn) ...@@ -79,8 +79,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn)
unsigned long **pp_qids; unsigned long **pp_qids;
u32 i; u32 i;
if (p_hwfn->hw_info.personality != QED_PCI_ETH && if (!QED_IS_L2_PERSONALITY(p_hwfn))
p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
return 0; return 0;
p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL); p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
...@@ -1228,19 +1227,6 @@ static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) ...@@ -1228,19 +1227,6 @@ static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
return action; return action;
} }
static void qed_set_fw_mac_addr(__le16 *fw_msb,
__le16 *fw_mid,
__le16 *fw_lsb,
u8 *mac)
{
((u8 *)fw_msb)[0] = mac[1];
((u8 *)fw_msb)[1] = mac[0];
((u8 *)fw_mid)[0] = mac[3];
((u8 *)fw_mid)[1] = mac[2];
((u8 *)fw_lsb)[0] = mac[5];
((u8 *)fw_lsb)[1] = mac[4];
}
static int static int
qed_filter_ucast_common(struct qed_hwfn *p_hwfn, qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
u16 opaque_fid, u16 opaque_fid,
......
...@@ -309,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -309,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_del(&p_pkt->list_entry); list_del(&p_pkt->list_entry);
b_last_packet = list_empty(&p_tx->active_descq); b_last_packet = list_empty(&p_tx->active_descq);
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) { if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
struct qed_ooo_buffer *p_buffer; struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
...@@ -532,7 +532,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) ...@@ -532,7 +532,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) { if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
struct qed_ooo_buffer *p_buffer; struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
...@@ -893,11 +893,11 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -893,11 +893,11 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en; p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_OOO) ? 0 : 1;
: 1;
if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) { p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) &&
(conn_type != QED_LL2_TYPE_IWARP)) {
p_ramrod->mf_si_bcast_accept_all = 1; p_ramrod->mf_si_bcast_accept_all = 1;
p_ramrod->mf_si_mcast_accept_all = 1; p_ramrod->mf_si_mcast_accept_all = 1;
} else { } else {
...@@ -924,7 +924,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -924,7 +924,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0; return 0;
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
p_ll2_conn->tx_stats_en = 0; p_ll2_conn->tx_stats_en = 0;
else else
p_ll2_conn->tx_stats_en = 1; p_ll2_conn->tx_stats_en = 1;
...@@ -955,10 +955,10 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -955,10 +955,10 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->pbl_size = cpu_to_le16(pbl_size); p_ramrod->pbl_size = cpu_to_le16(pbl_size);
switch (p_ll2_conn->input.tx_tc) { switch (p_ll2_conn->input.tx_tc) {
case LB_TC: case PURE_LB_TC:
pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
break; break;
case OOO_LB_TC: case PKT_LB_TC:
pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO); pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
break; break;
default: default:
...@@ -973,12 +973,20 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -973,12 +973,20 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->conn_type = PROTOCOLID_FCOE; p_ramrod->conn_type = PROTOCOLID_FCOE;
break; break;
case QED_LL2_TYPE_ISCSI: case QED_LL2_TYPE_ISCSI:
case QED_LL2_TYPE_ISCSI_OOO:
p_ramrod->conn_type = PROTOCOLID_ISCSI; p_ramrod->conn_type = PROTOCOLID_ISCSI;
break; break;
case QED_LL2_TYPE_ROCE: case QED_LL2_TYPE_ROCE:
p_ramrod->conn_type = PROTOCOLID_ROCE; p_ramrod->conn_type = PROTOCOLID_ROCE;
break; break;
case QED_LL2_TYPE_IWARP:
p_ramrod->conn_type = PROTOCOLID_IWARP;
break;
case QED_LL2_TYPE_OOO:
if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
p_ramrod->conn_type = PROTOCOLID_ISCSI;
else
p_ramrod->conn_type = PROTOCOLID_IWARP;
break;
default: default:
p_ramrod->conn_type = PROTOCOLID_ETH; p_ramrod->conn_type = PROTOCOLID_ETH;
DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
...@@ -1142,7 +1150,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, ...@@ -1142,7 +1150,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
u16 buf_idx; u16 buf_idx;
int rc = 0; int rc = 0;
if (p_ll2_info->input.conn_type != QED_LL2_TYPE_ISCSI_OOO) if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO)
return rc; return rc;
/* Correct number of requested OOO buffers if needed */ /* Correct number of requested OOO buffers if needed */
...@@ -1280,7 +1288,7 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) ...@@ -1280,7 +1288,7 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
goto q_allocate_fail; goto q_allocate_fail;
/* Register callbacks for the Rx/Tx queues */ /* Register callbacks for the Rx/Tx queues */
if (data->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) { if (data->input.conn_type == QED_LL2_TYPE_OOO) {
comp_rx_cb = qed_ll2_lb_rxq_completion; comp_rx_cb = qed_ll2_lb_rxq_completion;
comp_tx_cb = qed_ll2_lb_txq_completion; comp_tx_cb = qed_ll2_lb_txq_completion;
} else { } else {
...@@ -1339,7 +1347,7 @@ static void ...@@ -1339,7 +1347,7 @@ static void
qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn) struct qed_ll2_info *p_ll2_conn)
{ {
if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO) if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
return; return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
...@@ -1421,7 +1429,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) ...@@ -1421,7 +1429,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
if (rc) if (rc)
goto out; goto out;
if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1); qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
...@@ -1794,7 +1802,7 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) ...@@ -1794,7 +1802,7 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
qed_ll2_rxq_flush(p_hwfn, connection_handle); qed_ll2_rxq_flush(p_hwfn, connection_handle);
} }
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
...@@ -1816,7 +1824,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, ...@@ -1816,7 +1824,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
{ {
struct qed_ooo_buffer *p_buffer; struct qed_ooo_buffer *p_buffer;
if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO) if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
return; return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
...@@ -2063,7 +2071,7 @@ static void qed_ll2_set_conn_data(struct qed_dev *cdev, ...@@ -2063,7 +2071,7 @@ static void qed_ll2_set_conn_data(struct qed_dev *cdev,
ll2_cbs.cookie = QED_LEADING_HWFN(cdev); ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
if (lb) { if (lb) {
data->input.tx_tc = OOO_LB_TC; data->input.tx_tc = PKT_LB_TC;
data->input.tx_dest = QED_LL2_TX_DEST_LB; data->input.tx_dest = QED_LL2_TX_DEST_LB;
} else { } else {
data->input.tx_tc = 0; data->input.tx_tc = 0;
...@@ -2080,7 +2088,7 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev, ...@@ -2080,7 +2088,7 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
int rc; int rc;
qed_ll2_set_conn_data(cdev, &data, params, qed_ll2_set_conn_data(cdev, &data, params,
QED_LL2_TYPE_ISCSI_OOO, handle, true); QED_LL2_TYPE_OOO, handle, true);
rc = qed_ll2_acquire_connection(hwfn, &data); rc = qed_ll2_acquire_connection(hwfn, &data);
if (rc) { if (rc) {
......
...@@ -237,6 +237,8 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) ...@@ -237,6 +237,8 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
int qed_fill_dev_info(struct qed_dev *cdev, int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info) struct qed_dev_info *dev_info)
{ {
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_hw_info *hw_info = &p_hwfn->hw_info;
struct qed_tunnel_info *tun = &cdev->tunnel; struct qed_tunnel_info *tun = &cdev->tunnel;
struct qed_ptt *ptt; struct qed_ptt *ptt;
...@@ -260,11 +262,10 @@ int qed_fill_dev_info(struct qed_dev *cdev, ...@@ -260,11 +262,10 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_start = cdev->pci_params.mem_start; dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq; dev_info->pci_irq = cdev->pci_params.irq;
dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality == dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
QED_PCI_ETH_ROCE);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
dev_info->dev_type = cdev->type; dev_info->dev_type = cdev->type;
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
if (IS_PF(cdev)) { if (IS_PF(cdev)) {
dev_info->fw_major = FW_MAJOR_VERSION; dev_info->fw_major = FW_MAJOR_VERSION;
...@@ -274,8 +275,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, ...@@ -274,8 +275,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->mf_mode = cdev->mf_mode; dev_info->mf_mode = cdev->mf_mode;
dev_info->tx_switching = true; dev_info->tx_switching = true;
if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support == if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
QED_WOL_SUPPORT_PME)
dev_info->wol_support = true; dev_info->wol_support = true;
dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
...@@ -304,7 +304,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, ...@@ -304,7 +304,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
&dev_info->mfw_rev, NULL); &dev_info->mfw_rev, NULL);
} }
dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu; dev_info->mtu = hw_info->mtu;
return 0; return 0;
} }
...@@ -790,7 +790,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, ...@@ -790,7 +790,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->num_hwfns; cdev->num_hwfns;
if (!IS_ENABLED(CONFIG_QED_RDMA) || if (!IS_ENABLED(CONFIG_QED_RDMA) ||
QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE) !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
return 0; return 0;
for_each_hwfn(cdev, i) for_each_hwfn(cdev, i)
...@@ -931,8 +931,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, ...@@ -931,8 +931,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
/* In case we might support RDMA, don't allow qede to be greedy /* In case we might support RDMA, don't allow qede to be greedy
* with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn. * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
*/ */
if (QED_LEADING_HWFN(cdev)->hw_info.personality == if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
QED_PCI_ETH_ROCE) {
u16 *num_cons; u16 *num_cons;
num_cons = &params->eth_pf_params.num_cons; num_cons = &params->eth_pf_params.num_cons;
......
...@@ -161,7 +161,10 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, ...@@ -161,7 +161,10 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
NULL); NULL);
p_rdma_info->num_qps = num_cons / 2; if (QED_IS_IWARP_PERSONALITY(p_hwfn))
p_rdma_info->num_qps = num_cons;
else
p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */
num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
...@@ -252,6 +255,13 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, ...@@ -252,6 +255,13 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
"Failed to allocate real cid bitmap, rc = %d\n", rc); "Failed to allocate real cid bitmap, rc = %d\n", rc);
goto free_cid_map; goto free_cid_map;
} }
if (QED_IS_IWARP_PERSONALITY(p_hwfn))
rc = qed_iwarp_alloc(p_hwfn);
if (rc)
goto free_cid_map;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
return 0; return 0;
...@@ -329,6 +339,9 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) ...@@ -329,6 +339,9 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
{ {
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
if (QED_IS_IWARP_PERSONALITY(p_hwfn))
qed_iwarp_resc_free(p_hwfn);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
...@@ -470,6 +483,9 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, ...@@ -470,6 +483,9 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
if (QED_IS_IWARP_PERSONALITY(p_hwfn))
qed_iwarp_init_devinfo(p_hwfn);
} }
static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
...@@ -490,29 +506,17 @@ static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) ...@@ -490,29 +506,17 @@ static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
u32 ll2_ethertype_en; int rc = 0;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
p_hwfn->b_rdma_enabled_in_prs = false; p_hwfn->b_rdma_enabled_in_prs = false;
qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); if (QED_IS_IWARP_PERSONALITY(p_hwfn))
qed_iwarp_init_hw(p_hwfn, p_ptt);
p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE; else
rc = qed_roce_init_hw(p_hwfn, p_ptt);
/* We delay writing to this reg until first cid is allocated. See
* qed_cxt_dynamic_ilt_alloc function for more details
*/
ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
(ll2_ethertype_en | 0x01));
if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
return -EINVAL;
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n"); return rc;
return 0;
} }
static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
...@@ -544,7 +548,10 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, ...@@ -544,7 +548,10 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
return rc; return rc;
p_ramrod = &p_ent->ramrod.roce_init_func.rdma; if (QED_IS_IWARP_PERSONALITY(p_hwfn))
p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
else
p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
p_params_header = &p_ramrod->params_header; p_params_header = &p_ramrod->params_header;
p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
...@@ -641,7 +648,15 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, ...@@ -641,7 +648,15 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
return rc; return rc;
qed_roce_setup(p_hwfn); if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
rc = qed_iwarp_setup(p_hwfn, p_ptt, params);
if (rc)
return rc;
} else {
rc = qed_roce_setup(p_hwfn);
if (rc)
return rc;
}
return qed_rdma_start_fw(p_hwfn, params, p_ptt); return qed_rdma_start_fw(p_hwfn, params, p_ptt);
} }
...@@ -675,7 +690,16 @@ int qed_rdma_stop(void *rdma_cxt) ...@@ -675,7 +690,16 @@ int qed_rdma_stop(void *rdma_cxt)
qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
(ll2_ethertype_en & 0xFFFE)); (ll2_ethertype_en & 0xFFFE));
qed_roce_stop(p_hwfn); if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
rc = qed_iwarp_stop(p_hwfn, p_ptt);
if (rc) {
qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
} else {
qed_roce_stop(p_hwfn);
}
qed_ptt_release(p_hwfn, p_ptt); qed_ptt_release(p_hwfn, p_ptt);
/* Get SPQ entry */ /* Get SPQ entry */
...@@ -810,7 +834,9 @@ static int qed_fill_rdma_dev_info(struct qed_dev *cdev, ...@@ -810,7 +834,9 @@ static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
memset(info, 0, sizeof(*info)); memset(info, 0, sizeof(*info));
info->rdma_type = QED_RDMA_TYPE_ROCE; info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ?
QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP;
info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0); info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
qed_fill_dev_info(cdev, &info->common); qed_fill_dev_info(cdev, &info->common);
...@@ -1112,7 +1138,7 @@ static int qed_rdma_query_qp(void *rdma_cxt, ...@@ -1112,7 +1138,7 @@ static int qed_rdma_query_qp(void *rdma_cxt,
struct qed_rdma_query_qp_out_params *out_params) struct qed_rdma_query_qp_out_params *out_params)
{ {
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc; int rc = 0;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
...@@ -1138,7 +1164,10 @@ static int qed_rdma_query_qp(void *rdma_cxt, ...@@ -1138,7 +1164,10 @@ static int qed_rdma_query_qp(void *rdma_cxt,
out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp; out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
out_params->sqd_async = qp->sqd_async; out_params->sqd_async = qp->sqd_async;
rc = qed_roce_query_qp(p_hwfn, qp, out_params); if (QED_IS_IWARP_PERSONALITY(p_hwfn))
qed_iwarp_query_qp(qp, out_params);
else
rc = qed_roce_query_qp(p_hwfn, qp, out_params);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
return rc; return rc;
...@@ -1151,7 +1180,10 @@ static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) ...@@ -1151,7 +1180,10 @@ static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
rc = qed_roce_destroy_qp(p_hwfn, qp); if (QED_IS_IWARP_PERSONALITY(p_hwfn))
rc = qed_iwarp_destroy_qp(p_hwfn, qp);
else
rc = qed_roce_destroy_qp(p_hwfn, qp);
/* free qp params struct */ /* free qp params struct */
kfree(qp); kfree(qp);
...@@ -1190,20 +1222,27 @@ qed_rdma_create_qp(void *rdma_cxt, ...@@ -1190,20 +1222,27 @@ qed_rdma_create_qp(void *rdma_cxt,
return NULL; return NULL;
} }
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
if (in_params->sq_num_pages * sizeof(struct regpair) >
IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) {
DP_NOTICE(p_hwfn->cdev,
"Sq num pages: %d exceeds maximum\n",
in_params->sq_num_pages);
return NULL;
}
if (in_params->rq_num_pages * sizeof(struct regpair) >
IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) {
DP_NOTICE(p_hwfn->cdev,
"Rq num pages: %d exceeds maximum\n",
in_params->rq_num_pages);
return NULL;
}
}
qp = kzalloc(sizeof(*qp), GFP_KERNEL); qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) if (!qp)
return NULL; return NULL;
rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
qp->qpid = ((0xFF << 16) | qp->icid);
DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
if (rc) {
kfree(qp);
return NULL;
}
qp->cur_state = QED_ROCE_QP_STATE_RESET; qp->cur_state = QED_ROCE_QP_STATE_RESET;
qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi); qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo); qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
...@@ -1226,6 +1265,19 @@ qed_rdma_create_qp(void *rdma_cxt, ...@@ -1226,6 +1265,19 @@ qed_rdma_create_qp(void *rdma_cxt,
qp->e2e_flow_control_en = qp->use_srq ? false : true; qp->e2e_flow_control_en = qp->use_srq ? false : true;
qp->stats_queue = in_params->stats_queue; qp->stats_queue = in_params->stats_queue;
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
rc = qed_iwarp_create_qp(p_hwfn, qp, out_params);
qp->qpid = qp->icid;
} else {
rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
qp->qpid = ((0xFF << 16) | qp->icid);
}
if (rc) {
kfree(qp);
return NULL;
}
out_params->icid = qp->icid; out_params->icid = qp->icid;
out_params->qp_id = qp->qpid; out_params->qp_id = qp->qpid;
...@@ -1324,7 +1376,14 @@ static int qed_rdma_modify_qp(void *rdma_cxt, ...@@ -1324,7 +1376,14 @@ static int qed_rdma_modify_qp(void *rdma_cxt,
qp->cur_state); qp->cur_state);
} }
rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
enum qed_iwarp_qp_state new_state =
qed_roce2iwarp_state(qp->cur_state);
rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0);
} else {
rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
return rc; return rc;
...@@ -1713,6 +1772,12 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = { ...@@ -1713,6 +1772,12 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
.ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet, .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
.ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
.ll2_get_stats = &qed_ll2_get_stats, .ll2_get_stats = &qed_ll2_get_stats,
.iwarp_connect = &qed_iwarp_connect,
.iwarp_create_listen = &qed_iwarp_create_listen,
.iwarp_destroy_listen = &qed_iwarp_destroy_listen,
.iwarp_accept = &qed_iwarp_accept,
.iwarp_reject = &qed_iwarp_reject,
.iwarp_send_rtr = &qed_iwarp_send_rtr,
}; };
const struct qed_rdma_ops *qed_get_rdma_ops(void) const struct qed_rdma_ops *qed_get_rdma_ops(void)
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include "qed.h" #include "qed.h"
#include "qed_dev_api.h" #include "qed_dev_api.h"
#include "qed_hsi.h" #include "qed_hsi.h"
#include "qed_iwarp.h"
#include "qed_roce.h" #include "qed_roce.h"
#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS) #define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
...@@ -84,6 +85,7 @@ struct qed_rdma_info { ...@@ -84,6 +85,7 @@ struct qed_rdma_info {
struct qed_bmap qp_map; struct qed_bmap qp_map;
struct qed_bmap srq_map; struct qed_bmap srq_map;
struct qed_bmap cid_map; struct qed_bmap cid_map;
struct qed_bmap tcp_cid_map;
struct qed_bmap real_cid_map; struct qed_bmap real_cid_map;
struct qed_bmap dpi_map; struct qed_bmap dpi_map;
struct qed_bmap toggle_bits; struct qed_bmap toggle_bits;
...@@ -97,6 +99,7 @@ struct qed_rdma_info { ...@@ -97,6 +99,7 @@ struct qed_rdma_info {
u16 queue_zone_base; u16 queue_zone_base;
u16 max_queue_zones; u16 max_queue_zones;
enum protocol_type proto; enum protocol_type proto;
struct qed_iwarp_info iwarp;
}; };
struct qed_rdma_qp { struct qed_rdma_qp {
...@@ -105,6 +108,7 @@ struct qed_rdma_qp { ...@@ -105,6 +108,7 @@ struct qed_rdma_qp {
u32 qpid; u32 qpid;
u16 icid; u16 icid;
enum qed_roce_qp_state cur_state; enum qed_roce_qp_state cur_state;
enum qed_iwarp_qp_state iwarp_state;
bool use_srq; bool use_srq;
bool signal_all; bool signal_all;
bool fmr_and_reserved_lkey; bool fmr_and_reserved_lkey;
...@@ -164,6 +168,7 @@ struct qed_rdma_qp { ...@@ -164,6 +168,7 @@ struct qed_rdma_qp {
void *shared_queue; void *shared_queue;
dma_addr_t shared_queue_phys_addr; dma_addr_t shared_queue_phys_addr;
struct qed_iwarp_ep *ep;
}; };
#if IS_ENABLED(CONFIG_QED_RDMA) #if IS_ENABLED(CONFIG_QED_RDMA)
......
...@@ -1149,3 +1149,23 @@ int qed_roce_setup(struct qed_hwfn *p_hwfn) ...@@ -1149,3 +1149,23 @@ int qed_roce_setup(struct qed_hwfn *p_hwfn)
qed_roce_async_event); qed_roce_async_event);
} }
int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 ll2_ethertype_en;
qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
(ll2_ethertype_en | 0x01));
if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
return -EINVAL;
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
return 0;
}
...@@ -104,12 +104,17 @@ union ramrod_data { ...@@ -104,12 +104,17 @@ union ramrod_data {
struct roce_query_qp_req_ramrod_data roce_query_qp_req; struct roce_query_qp_req_ramrod_data roce_query_qp_req;
struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp; struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req; struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
struct roce_init_func_ramrod_data roce_init_func;
struct rdma_create_cq_ramrod_data rdma_create_cq; struct rdma_create_cq_ramrod_data rdma_create_cq;
struct rdma_destroy_cq_ramrod_data rdma_destroy_cq; struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
struct rdma_srq_create_ramrod_data rdma_create_srq; struct rdma_srq_create_ramrod_data rdma_create_srq;
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
struct rdma_srq_modify_ramrod_data rdma_modify_srq; struct rdma_srq_modify_ramrod_data rdma_modify_srq;
struct roce_init_func_ramrod_data roce_init_func; struct iwarp_create_qp_ramrod_data iwarp_create_qp;
struct iwarp_tcp_offload_ramrod_data iwarp_tcp_offload;
struct iwarp_mpa_offload_ramrod_data iwarp_mpa_offload;
struct iwarp_modify_qp_ramrod_data iwarp_modify_qp;
struct iwarp_init_func_ramrod_data iwarp_init_func;
struct fcoe_init_ramrod_params fcoe_init; struct fcoe_init_ramrod_params fcoe_init;
struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld; struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld;
struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate; struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
......
...@@ -38,6 +38,8 @@ ...@@ -38,6 +38,8 @@
#include <linux/slab.h> #include <linux/slab.h>
/* dma_addr_t manip */ /* dma_addr_t manip */
#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff))
#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16))
#define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x))
#define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x))
#define DMA_REGPAIR_LE(x, val) do { \ #define DMA_REGPAIR_LE(x, val) do { \
...@@ -778,7 +780,7 @@ enum protocol_type { ...@@ -778,7 +780,7 @@ enum protocol_type {
PROTOCOLID_ROCE, PROTOCOLID_ROCE,
PROTOCOLID_CORE, PROTOCOLID_CORE,
PROTOCOLID_ETH, PROTOCOLID_ETH,
PROTOCOLID_RESERVED4, PROTOCOLID_IWARP,
PROTOCOLID_RESERVED5, PROTOCOLID_RESERVED5,
PROTOCOLID_PREROCE, PROTOCOLID_PREROCE,
PROTOCOLID_COMMON, PROTOCOLID_COMMON,
......
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and /or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __IWARP_COMMON__
#define __IWARP_COMMON__
#include <linux/qed/rdma_common.h>
/************************/
/* IWARP FW CONSTANTS */
/************************/
#define IWARP_ACTIVE_MODE 0
#define IWARP_PASSIVE_MODE 1
#define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000)
#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000)
#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000)
#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000)
#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000)
#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128)
#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176)
#define IWARP_MAX_QPS (64 * 1024)
#endif /* __IWARP_COMMON__ */
...@@ -47,9 +47,10 @@ enum qed_ll2_conn_type { ...@@ -47,9 +47,10 @@ enum qed_ll2_conn_type {
QED_LL2_TYPE_FCOE, QED_LL2_TYPE_FCOE,
QED_LL2_TYPE_ISCSI, QED_LL2_TYPE_ISCSI,
QED_LL2_TYPE_TEST, QED_LL2_TYPE_TEST,
QED_LL2_TYPE_ISCSI_OOO, QED_LL2_TYPE_OOO,
QED_LL2_TYPE_RESERVED2, QED_LL2_TYPE_RESERVED2,
QED_LL2_TYPE_ROCE, QED_LL2_TYPE_ROCE,
QED_LL2_TYPE_IWARP,
QED_LL2_TYPE_RESERVED3, QED_LL2_TYPE_RESERVED3,
MAX_QED_LL2_RX_CONN_TYPE MAX_QED_LL2_RX_CONN_TYPE
}; };
......
...@@ -470,6 +470,101 @@ struct qed_rdma_counters_out_params { ...@@ -470,6 +470,101 @@ struct qed_rdma_counters_out_params {
#define QED_ROCE_TX_HEAD_FAILURE (1) #define QED_ROCE_TX_HEAD_FAILURE (1)
#define QED_ROCE_TX_FRAG_FAILURE (2) #define QED_ROCE_TX_FRAG_FAILURE (2)
enum qed_iwarp_event_type {
QED_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */
QED_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
QED_IWARP_EVENT_DISCONNECT,
QED_IWARP_EVENT_CLOSE,
QED_IWARP_EVENT_IRQ_FULL,
QED_IWARP_EVENT_RQ_EMPTY,
QED_IWARP_EVENT_LLP_TIMEOUT,
QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
QED_IWARP_EVENT_CQ_OVERFLOW,
QED_IWARP_EVENT_QP_CATASTROPHIC,
QED_IWARP_EVENT_ACTIVE_MPA_REPLY,
QED_IWARP_EVENT_LOCAL_ACCESS_ERROR,
QED_IWARP_EVENT_REMOTE_OPERATION_ERROR,
QED_IWARP_EVENT_TERMINATE_RECEIVED
};
enum qed_tcp_ip_version {
QED_TCP_IPV4,
QED_TCP_IPV6,
};
struct qed_iwarp_cm_info {
enum qed_tcp_ip_version ip_version;
u32 remote_ip[4];
u32 local_ip[4];
u16 remote_port;
u16 local_port;
u16 vlan;
u8 ord;
u8 ird;
u16 private_data_len;
const void *private_data;
};
struct qed_iwarp_cm_event_params {
enum qed_iwarp_event_type event;
const struct qed_iwarp_cm_info *cm_info;
void *ep_context; /* To be passed to accept call */
int status;
};
typedef int (*iwarp_event_handler) (void *context,
struct qed_iwarp_cm_event_params *event);
struct qed_iwarp_connect_in {
iwarp_event_handler event_cb;
void *cb_context;
struct qed_rdma_qp *qp;
struct qed_iwarp_cm_info cm_info;
u16 mss;
u8 remote_mac_addr[ETH_ALEN];
u8 local_mac_addr[ETH_ALEN];
};
struct qed_iwarp_connect_out {
void *ep_context;
};
struct qed_iwarp_listen_in {
iwarp_event_handler event_cb;
void *cb_context; /* passed to event_cb */
u32 max_backlog;
enum qed_tcp_ip_version ip_version;
u32 ip_addr[4];
u16 port;
u16 vlan;
};
struct qed_iwarp_listen_out {
void *handle;
};
struct qed_iwarp_accept_in {
void *ep_context;
void *cb_context;
struct qed_rdma_qp *qp;
const void *private_data;
u16 private_data_len;
u8 ord;
u8 ird;
};
struct qed_iwarp_reject_in {
void *ep_context;
void *cb_context;
const void *private_data;
u16 private_data_len;
};
struct qed_iwarp_send_rtr_in {
void *ep_context;
};
struct qed_roce_ll2_header { struct qed_roce_ll2_header {
void *vaddr; void *vaddr;
dma_addr_t baddr; dma_addr_t baddr;
...@@ -491,6 +586,7 @@ struct qed_roce_ll2_packet { ...@@ -491,6 +586,7 @@ struct qed_roce_ll2_packet {
enum qed_rdma_type { enum qed_rdma_type {
QED_RDMA_TYPE_ROCE, QED_RDMA_TYPE_ROCE,
QED_RDMA_TYPE_IWARP
}; };
struct qed_dev_rdma_info { struct qed_dev_rdma_info {
...@@ -575,6 +671,24 @@ struct qed_rdma_ops { ...@@ -575,6 +671,24 @@ struct qed_rdma_ops {
int (*ll2_set_mac_filter)(struct qed_dev *cdev, int (*ll2_set_mac_filter)(struct qed_dev *cdev,
u8 *old_mac_address, u8 *new_mac_address); u8 *old_mac_address, u8 *new_mac_address);
int (*iwarp_connect)(void *rdma_cxt,
struct qed_iwarp_connect_in *iparams,
struct qed_iwarp_connect_out *oparams);
int (*iwarp_create_listen)(void *rdma_cxt,
struct qed_iwarp_listen_in *iparams,
struct qed_iwarp_listen_out *oparams);
int (*iwarp_accept)(void *rdma_cxt,
struct qed_iwarp_accept_in *iparams);
int (*iwarp_reject)(void *rdma_cxt,
struct qed_iwarp_reject_in *iparams);
int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle);
int (*iwarp_send_rtr)(void *rdma_cxt,
struct qed_iwarp_send_rtr_in *iparams);
}; };
const struct qed_rdma_ops *qed_get_rdma_ops(void); const struct qed_rdma_ops *qed_get_rdma_ops(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment