Commit 76684ab8 authored by Shai Malin's avatar Shai Malin Committed by David S. Miller

qed: Add NVMeTCP Offload Connection Level FW and HW HSI

This patch introduces the NVMeTCP HSI and HSI functionality in order to
initialize and interact with the HW device as part of the connection level
HSI.

This includes:
- Connection offload: offload a TCP connection to the FW.
- Connection update: update the ICReq-ICResp params
- Connection clear SQ: outstanding IOs FW flush.
- Connection termination: terminate the TCP connection and flush the FW.
Acked-by: default avatarIgor Russkikh <irusskikh@marvell.com>
Signed-off-by: default avatarPrabhakar Kushwaha <pkushwaha@marvell.com>
Signed-off-by: default avatarOmkar Kulkarni <okulkarni@marvell.com>
Signed-off-by: default avatarShai Malin <smalin@marvell.com>
Signed-off-by: default avatarMichal Kalderon <mkalderon@marvell.com>
Signed-off-by: default avatarAriel Elior <aelior@marvell.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 897e87a1
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#define QED_NVMETCP_FW_CQ_SIZE (4 * 1024) #define QED_NVMETCP_FW_CQ_SIZE (4 * 1024)
/* tcp parameters */ /* tcp parameters */
#define QED_TCP_FLOW_LABEL 0
#define QED_TCP_TWO_MSL_TIMER 4000 #define QED_TCP_TWO_MSL_TIMER 4000
#define QED_TCP_HALF_WAY_CLOSE_TIMEOUT 10 #define QED_TCP_HALF_WAY_CLOSE_TIMEOUT 10
#define QED_TCP_MAX_FIN_RT 2 #define QED_TCP_MAX_FIN_RT 2
...@@ -32,6 +33,57 @@ struct qed_nvmetcp_info { ...@@ -32,6 +33,57 @@ struct qed_nvmetcp_info {
nvmetcp_event_cb_t event_cb; nvmetcp_event_cb_t event_cb;
}; };
struct qed_hash_nvmetcp_con {
struct hlist_node node;
struct qed_nvmetcp_conn *con;
};
struct qed_nvmetcp_conn {
struct list_head list_entry;
bool free_on_delete;
u16 conn_id;
u32 icid;
u32 fw_cid;
u8 layer_code;
u8 offl_flags;
u8 connect_mode;
dma_addr_t sq_pbl_addr;
struct qed_chain r2tq;
struct qed_chain xhq;
struct qed_chain uhq;
u8 local_mac[6];
u8 remote_mac[6];
u8 ip_version;
u8 ka_max_probe_cnt;
u16 vlan_id;
u16 tcp_flags;
u32 remote_ip[4];
u32 local_ip[4];
u32 flow_label;
u32 ka_timeout;
u32 ka_interval;
u32 max_rt_time;
u8 ttl;
u8 tos_or_tc;
u16 remote_port;
u16 local_port;
u16 mss;
u8 rcv_wnd_scale;
u32 rcv_wnd;
u32 cwnd;
u8 update_flag;
u8 default_cq;
u8 abortive_dsconnect;
u32 max_seq_size;
u32 max_recv_pdu_length;
u32 max_send_pdu_length;
u32 first_seq_length;
u16 physical_q0;
u16 physical_q1;
u16 nvmetcp_cccid_max_range;
dma_addr_t nvmetcp_cccid_itid_table_addr;
};
#if IS_ENABLED(CONFIG_QED_NVMETCP) #if IS_ENABLED(CONFIG_QED_NVMETCP)
int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn); int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn);
void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn); void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn);
......
...@@ -101,6 +101,9 @@ union ramrod_data { ...@@ -101,6 +101,9 @@ union ramrod_data {
struct iscsi_spe_conn_termination iscsi_conn_terminate; struct iscsi_spe_conn_termination iscsi_conn_terminate;
struct nvmetcp_init_ramrod_params nvmetcp_init; struct nvmetcp_init_ramrod_params nvmetcp_init;
struct nvmetcp_spe_conn_offload nvmetcp_conn_offload;
struct nvmetcp_conn_update_ramrod_params nvmetcp_conn_update;
struct nvmetcp_spe_conn_termination nvmetcp_conn_terminate;
struct vf_start_ramrod_data vf_start; struct vf_start_ramrod_data vf_start;
struct vf_stop_ramrod_data vf_stop; struct vf_stop_ramrod_data vf_stop;
......
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#include "tcp_common.h" #include "tcp_common.h"
#define NVMETCP_SLOW_PATH_LAYER_CODE (6)
/* NVMeTCP firmware function init parameters */ /* NVMeTCP firmware function init parameters */
struct nvmetcp_spe_func_init { struct nvmetcp_spe_func_init {
__le16 half_way_close_timeout; __le16 half_way_close_timeout;
...@@ -43,6 +45,10 @@ enum nvmetcp_ramrod_cmd_id { ...@@ -43,6 +45,10 @@ enum nvmetcp_ramrod_cmd_id {
NVMETCP_RAMROD_CMD_ID_UNUSED = 0, NVMETCP_RAMROD_CMD_ID_UNUSED = 0,
NVMETCP_RAMROD_CMD_ID_INIT_FUNC = 1, NVMETCP_RAMROD_CMD_ID_INIT_FUNC = 1,
NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC = 2, NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC = 2,
NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN = 3,
NVMETCP_RAMROD_CMD_ID_UPDATE_CONN = 4,
NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN = 5,
NVMETCP_RAMROD_CMD_ID_CLEAR_SQ = 6,
MAX_NVMETCP_RAMROD_CMD_ID MAX_NVMETCP_RAMROD_CMD_ID
}; };
...@@ -51,4 +57,141 @@ struct nvmetcp_glbl_queue_entry { ...@@ -51,4 +57,141 @@ struct nvmetcp_glbl_queue_entry {
struct regpair reserved; struct regpair reserved;
}; };
/* NVMeTCP conn level EQEs */
enum nvmetcp_eqe_opcode {
NVMETCP_EVENT_TYPE_INIT_FUNC = 0, /* Response after init Ramrod */
NVMETCP_EVENT_TYPE_DESTROY_FUNC, /* Response after destroy Ramrod */
NVMETCP_EVENT_TYPE_OFFLOAD_CONN,/* Response after option 2 offload Ramrod */
NVMETCP_EVENT_TYPE_UPDATE_CONN, /* Response after update Ramrod */
NVMETCP_EVENT_TYPE_CLEAR_SQ, /* Response after clear sq Ramrod */
NVMETCP_EVENT_TYPE_TERMINATE_CONN, /* Response after termination Ramrod */
NVMETCP_EVENT_TYPE_RESERVED0,
NVMETCP_EVENT_TYPE_RESERVED1,
NVMETCP_EVENT_TYPE_ASYN_CONNECT_COMPLETE, /* Connect completed (A-syn EQE) */
NVMETCP_EVENT_TYPE_ASYN_TERMINATE_DONE, /* Termination completed (A-syn EQE) */
NVMETCP_EVENT_TYPE_START_OF_ERROR_TYPES = 10, /* Separate EQs from err EQs */
NVMETCP_EVENT_TYPE_ASYN_ABORT_RCVD, /* TCP RST packet receive (A-syn EQE) */
NVMETCP_EVENT_TYPE_ASYN_CLOSE_RCVD, /* TCP FIN packet receive (A-syn EQE) */
NVMETCP_EVENT_TYPE_ASYN_SYN_RCVD, /* TCP SYN+ACK packet receive (A-syn EQE) */
NVMETCP_EVENT_TYPE_ASYN_MAX_RT_TIME, /* TCP max retransmit time (A-syn EQE) */
NVMETCP_EVENT_TYPE_ASYN_MAX_RT_CNT, /* TCP max retransmit count (A-syn EQE) */
NVMETCP_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT, /* TCP ka probes count (A-syn EQE) */
NVMETCP_EVENT_TYPE_ASYN_FIN_WAIT2, /* TCP fin wait 2 (A-syn EQE) */
NVMETCP_EVENT_TYPE_NVMETCP_CONN_ERROR, /* NVMeTCP error response (A-syn EQE) */
NVMETCP_EVENT_TYPE_TCP_CONN_ERROR, /* NVMeTCP error - tcp error (A-syn EQE) */
MAX_NVMETCP_EQE_OPCODE
};
struct nvmetcp_conn_offload_section {
struct regpair cccid_itid_table_addr; /* CCCID to iTID table address */
__le16 cccid_max_range; /* CCCID max value - used for validation */
__le16 reserved[3];
};
/* NVMe TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod */
struct nvmetcp_conn_offload_params {
struct regpair sq_pbl_addr;
struct regpair r2tq_pbl_addr;
struct regpair xhq_pbl_addr;
struct regpair uhq_pbl_addr;
__le16 physical_q0;
__le16 physical_q1;
u8 flags;
#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_MASK 0x1
#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_SHIFT 3
#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0xF
#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 4
u8 default_cq;
__le16 reserved0;
__le32 reserved1;
__le32 initial_ack;
struct nvmetcp_conn_offload_section nvmetcp; /* NVMe/TCP section */
};
/* NVMe TCP and TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod. */
struct nvmetcp_spe_conn_offload {
__le16 reserved;
__le16 conn_id;
__le32 fw_cid;
struct nvmetcp_conn_offload_params nvmetcp;
struct tcp_offload_params_opt2 tcp;
};
/* NVMeTCP connection update params passed by driver to FW in NVMETCP update ramrod. */
struct nvmetcp_conn_update_ramrod_params {
__le16 reserved0;
__le16 conn_id;
__le32 reserved1;
u8 flags;
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_MASK 0x1
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_SHIFT 2
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x1
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_DATA_SHIFT 3
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_MASK 0x1
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_SHIFT 4
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_MASK 0x1
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_SHIFT 5
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_MASK 0x1
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_SHIFT 6
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_MASK 0x1
#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_SHIFT 7
u8 reserved3[3];
__le32 max_seq_size;
__le32 max_send_pdu_length;
__le32 max_recv_pdu_length;
__le32 first_seq_length;
__le32 reserved4[5];
};
/* NVMeTCP connection termination request */
struct nvmetcp_spe_conn_termination {
__le16 reserved0;
__le16 conn_id;
__le32 reserved1;
u8 abortive;
u8 reserved2[7];
struct regpair reserved3;
struct regpair reserved4;
};
struct nvmetcp_dif_flags {
u8 flags;
};
enum nvmetcp_wqe_type {
NVMETCP_WQE_TYPE_NORMAL,
NVMETCP_WQE_TYPE_TASK_CLEANUP,
NVMETCP_WQE_TYPE_MIDDLE_PATH,
NVMETCP_WQE_TYPE_IC,
MAX_NVMETCP_WQE_TYPE
};
struct nvmetcp_wqe {
__le16 task_id;
u8 flags;
#define NVMETCP_WQE_WQE_TYPE_MASK 0x7 /* [use nvmetcp_wqe_type] */
#define NVMETCP_WQE_WQE_TYPE_SHIFT 0
#define NVMETCP_WQE_NUM_SGES_MASK 0xF
#define NVMETCP_WQE_NUM_SGES_SHIFT 3
#define NVMETCP_WQE_RESPONSE_MASK 0x1
#define NVMETCP_WQE_RESPONSE_SHIFT 7
struct nvmetcp_dif_flags prot_flags;
__le32 contlen_cdbsize;
#define NVMETCP_WQE_CONT_LEN_MASK 0xFFFFFF
#define NVMETCP_WQE_CONT_LEN_SHIFT 0
#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_MASK 0xFF
#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_SHIFT 24
};
#endif /* __NVMETCP_COMMON__ */ #endif /* __NVMETCP_COMMON__ */
...@@ -24,6 +24,50 @@ struct qed_nvmetcp_tid { ...@@ -24,6 +24,50 @@ struct qed_nvmetcp_tid {
u8 *blocks[MAX_TID_BLOCKS_NVMETCP]; u8 *blocks[MAX_TID_BLOCKS_NVMETCP];
}; };
struct qed_nvmetcp_id_params {
u8 mac[ETH_ALEN];
u32 ip[4];
u16 port;
};
struct qed_nvmetcp_params_offload {
/* FW initializations */
dma_addr_t sq_pbl_addr;
dma_addr_t nvmetcp_cccid_itid_table_addr;
u16 nvmetcp_cccid_max_range;
u8 default_cq;
/* Networking and TCP stack initializations */
struct qed_nvmetcp_id_params src;
struct qed_nvmetcp_id_params dst;
u32 ka_timeout;
u32 ka_interval;
u32 max_rt_time;
u32 cwnd;
u16 mss;
u16 vlan_id;
bool timestamp_en;
bool delayed_ack_en;
bool tcp_keep_alive_en;
bool ecn_en;
u8 ip_version;
u8 ka_max_probe_cnt;
u8 ttl;
u8 tos_or_tc;
u8 rcv_wnd_scale;
};
struct qed_nvmetcp_params_update {
u32 max_io_size;
u32 max_recv_pdu_length;
u32 max_send_pdu_length;
/* Placeholder: pfv, cpda, hpda */
bool hdr_digest_en;
bool data_digest_en;
};
struct qed_nvmetcp_cb_ops { struct qed_nvmetcp_cb_ops {
struct qed_common_cb_ops common; struct qed_common_cb_ops common;
}; };
...@@ -47,6 +91,38 @@ struct qed_nvmetcp_cb_ops { ...@@ -47,6 +91,38 @@ struct qed_nvmetcp_cb_ops {
* @stop: nvmetcp in FW * @stop: nvmetcp in FW
* @param cdev * @param cdev
* return 0 on success, otherwise error value. * return 0 on success, otherwise error value.
* @acquire_conn: acquire a new nvmetcp connection
* @param cdev
* @param handle - qed will fill handle that should be
* used henceforth as identifier of the
* connection.
* @param p_doorbell - qed will fill the address of the
* doorbell.
* @return 0 on sucesss, otherwise error value.
* @release_conn: release a previously acquired nvmetcp connection
* @param cdev
* @param handle - the connection handle.
* @return 0 on success, otherwise error value.
* @offload_conn: configures an offloaded connection
* @param cdev
* @param handle - the connection handle.
* @param conn_info - the configuration to use for the
* offload.
* @return 0 on success, otherwise error value.
* @update_conn: updates an offloaded connection
* @param cdev
* @param handle - the connection handle.
* @param conn_info - the configuration to use for the
* offload.
* @return 0 on success, otherwise error value.
* @destroy_conn: stops an offloaded connection
* @param cdev
* @param handle - the connection handle.
* @return 0 on success, otherwise error value.
* @clear_sq: clear all task in sq
* @param cdev
* @param handle - the connection handle.
* @return 0 on success, otherwise error value.
*/ */
struct qed_nvmetcp_ops { struct qed_nvmetcp_ops {
const struct qed_common_ops *common; const struct qed_common_ops *common;
...@@ -64,6 +140,24 @@ struct qed_nvmetcp_ops { ...@@ -64,6 +140,24 @@ struct qed_nvmetcp_ops {
void *event_context, nvmetcp_event_cb_t async_event_cb); void *event_context, nvmetcp_event_cb_t async_event_cb);
int (*stop)(struct qed_dev *cdev); int (*stop)(struct qed_dev *cdev);
int (*acquire_conn)(struct qed_dev *cdev,
u32 *handle,
u32 *fw_cid, void __iomem **p_doorbell);
int (*release_conn)(struct qed_dev *cdev, u32 handle);
int (*offload_conn)(struct qed_dev *cdev,
u32 handle,
struct qed_nvmetcp_params_offload *conn_info);
int (*update_conn)(struct qed_dev *cdev,
u32 handle,
struct qed_nvmetcp_params_update *conn_info);
int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn);
int (*clear_sq)(struct qed_dev *cdev, u32 handle);
}; };
const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void); const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment