Commit be086e7c authored by Mintz, Yuval's avatar Mintz, Yuval Committed by David S. Miller

qed*: Utilize Firmware 8.15.3.0

This patch advances the qed* drivers into using the newer firmware -
This solves several firmware bugs, mostly related [but not limited to]
various init/deinit issues in various offloaded protocols.

It also introduces a major 4-Cached SGE change in firmware, which can be
seen in the storage drivers' changes.

In addition, this firmware is required for supporting the new QL41xxx
series of adapters; While this patch doesn't add the actual support,
the firmware contains the necessary initialization & firmware logic to
operate such adapters [actual support would be added later on].

Changes from Previous versions:
-------------------------------
 - V2 - fix kbuild-test robot warnings
Signed-off-by: default avatarTomer Tayar <Tomer.Tayar@cavium.com>
Signed-off-by: default avatarRam Amrani <Ram.Amrani@cavium.com>
Signed-off-by: default avatarManish Rangankar <Manish.Rangankar@cavium.com>
Signed-off-by: default avatarChad Dupuis <Chad.Dupuis@cavium.com>
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6a019c5c
...@@ -587,9 +587,8 @@ void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle) ...@@ -587,9 +587,8 @@ void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
#define EVENT_TYPE_CQ 1 #define EVENT_TYPE_CQ 1
#define EVENT_TYPE_QP 2 #define EVENT_TYPE_QP 2
struct qedr_dev *dev = (struct qedr_dev *)context; struct qedr_dev *dev = (struct qedr_dev *)context;
union event_ring_data *data = fw_handle; struct regpair *async_handle = (struct regpair *)fw_handle;
u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) + u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
data->roce_handle.lo;
u8 event_type = EVENT_TYPE_NOT_DEFINED; u8 event_type = EVENT_TYPE_NOT_DEFINED;
struct ib_event event; struct ib_event event;
struct ib_cq *ibcq; struct ib_cq *ibcq;
......
...@@ -38,7 +38,8 @@ ...@@ -38,7 +38,8 @@
#include <linux/qed/qed_chain.h> #include <linux/qed/qed_chain.h>
#include <linux/qed/qed_roce_if.h> #include <linux/qed/qed_roce_if.h>
#include <linux/qed/qede_roce.h> #include <linux/qed/qede_roce.h>
#include "qedr_hsi.h" #include <linux/qed/roce_common.h>
#include "qedr_hsi_rdma.h"
#define QEDR_MODULE_VERSION "8.10.10.0" #define QEDR_MODULE_VERSION "8.10.10.0"
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA" #define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
......
...@@ -43,14 +43,11 @@ ...@@ -43,14 +43,11 @@
#include <rdma/ib_addr.h> #include <rdma/ib_addr.h>
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include "qedr_hsi.h"
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
#include <linux/qed/qed_roce_if.h> #include <linux/qed/qed_roce_if.h>
#include "qedr.h" #include "qedr.h"
#include "qedr_hsi.h"
#include "verbs.h" #include "verbs.h"
#include <rdma/qedr-abi.h> #include <rdma/qedr-abi.h>
#include "qedr_hsi.h"
#include "qedr_cm.h" #include "qedr_cm.h"
void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info) void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
......
/* QLogic qedr NIC Driver
* Copyright (c) 2015-2016 QLogic Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and /or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __QED_HSI_ROCE__
#define __QED_HSI_ROCE__
#include <linux/qed/common_hsi.h>
#include <linux/qed/roce_common.h>
#include "qedr_hsi_rdma.h"
/* Affiliated asynchronous events / errors enumeration */
enum roce_async_events_type {
ROCE_ASYNC_EVENT_NONE = 0,
ROCE_ASYNC_EVENT_COMM_EST = 1,
ROCE_ASYNC_EVENT_SQ_DRAINED,
ROCE_ASYNC_EVENT_SRQ_LIMIT,
ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
ROCE_ASYNC_EVENT_CQ_ERR,
ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
ROCE_ASYNC_EVENT_SRQ_EMPTY,
MAX_ROCE_ASYNC_EVENTS_TYPE
};
#endif /* __QED_HSI_ROCE__ */
...@@ -43,7 +43,8 @@ ...@@ -43,7 +43,8 @@
#include <rdma/ib_addr.h> #include <rdma/ib_addr.h>
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include "qedr_hsi.h" #include <linux/qed/common_hsi.h>
#include "qedr_hsi_rdma.h"
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
#include "qedr.h" #include "qedr.h"
#include "verbs.h" #include "verbs.h"
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
#include "qed_hsi.h" #include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass; extern const struct qed_common_ops qed_common_ops_pass;
#define DRV_MODULE_VERSION "8.10.10.20" #define DRV_MODULE_VERSION "8.10.10.21"
#define MAX_HWFNS_PER_DEVICE (4) #define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16 #define NAME_SIZE 16
......
...@@ -71,8 +71,7 @@ ...@@ -71,8 +71,7 @@
#define TM_ALIGN BIT(TM_SHIFT) #define TM_ALIGN BIT(TM_SHIFT)
#define TM_ELEM_SIZE 4 #define TM_ELEM_SIZE 4
/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */ #define ILT_DEFAULT_HW_P_SIZE 4
#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
...@@ -1126,7 +1125,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) ...@@ -1126,7 +1125,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT); clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT); clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE); clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
/* default ILT page size for all clients is 32K */ /* default ILT page size for all clients is 64K */
for (i = 0; i < ILT_CLI_MAX; i++) for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
......
This diff is collapsed.
This diff is collapsed.
...@@ -215,13 +215,6 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, ...@@ -215,13 +215,6 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
{ {
u32 qm_line_crd; u32 qm_line_crd;
/* In A0 - Limit the size of pbf queue so that only 511 commands with
* the minimum size of 4 (FCoE minimum size)
*/
bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
if (is_bb_a0)
cmdq_lines = min_t(u32, cmdq_lines, 1022);
qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
(u32)cmdq_lines); (u32)cmdq_lines);
...@@ -343,13 +336,11 @@ static void qed_tx_pq_map_rt_init( ...@@ -343,13 +336,11 @@ static void qed_tx_pq_map_rt_init(
u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE; u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
u16 last_pq_group = (p_params->start_pq + num_pqs - 1) / u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
QM_PF_QUEUE_GROUP_SIZE; QM_PF_QUEUE_GROUP_SIZE;
bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
u16 i, pq_id, pq_group; u16 i, pq_id, pq_group;
/* a bit per Tx PQ indicating if the PQ is associated with a VF */ /* a bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE; u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids); u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids); u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
u32 mem_addr_4kb = base_mem_addr_4kb; u32 mem_addr_4kb = base_mem_addr_4kb;
...@@ -371,6 +362,10 @@ static void qed_tx_pq_map_rt_init( ...@@ -371,6 +362,10 @@ static void qed_tx_pq_map_rt_init(
bool is_vf_pq = (i >= p_params->num_pf_pqs); bool is_vf_pq = (i >= p_params->num_pf_pqs);
struct qm_rf_pq_map tx_pq_map; struct qm_rf_pq_map tx_pq_map;
bool rl_valid = p_params->pq_params[i].rl_valid &&
(p_params->pq_params[i].vport_id <
MAX_QM_GLOBAL_RLS);
/* update first Tx PQ of VPORT/TC */ /* update first Tx PQ of VPORT/TC */
u8 vport_id_in_pf = p_params->pq_params[i].vport_id - u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
p_params->start_vport; p_params->start_vport;
...@@ -389,14 +384,18 @@ static void qed_tx_pq_map_rt_init( ...@@ -389,14 +384,18 @@ static void qed_tx_pq_map_rt_init(
(p_params->pf_id << (p_params->pf_id <<
QM_WFQ_VP_PQ_PF_SHIFT)); QM_WFQ_VP_PQ_PF_SHIFT));
} }
if (p_params->pq_params[i].rl_valid && !rl_valid)
DP_NOTICE(p_hwfn,
"Invalid VPORT ID for rate limiter configuration");
/* fill PQ map entry */ /* fill PQ map entry */
memset(&tx_pq_map, 0, sizeof(tx_pq_map)); memset(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID, SET_FIELD(tx_pq_map.reg,
p_params->pq_params[i].rl_valid ? 1 : 0); QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID, SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
p_params->pq_params[i].rl_valid ? rl_valid ?
p_params->pq_params[i].vport_id : 0); p_params->pq_params[i].vport_id : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
...@@ -413,8 +412,9 @@ static void qed_tx_pq_map_rt_init( ...@@ -413,8 +412,9 @@ static void qed_tx_pq_map_rt_init(
/* if PQ is associated with a VF, add indication /* if PQ is associated with a VF, add indication
* to PQ VF mask * to PQ VF mask
*/ */
tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |= tx_pq_vf_mask[pq_id /
(1 << (pq_id % tx_pq_vf_mask_width)); QM_PF_QUEUE_GROUP_SIZE] |=
BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
mem_addr_4kb += vport_pq_mem_4kb; mem_addr_4kb += vport_pq_mem_4kb;
} else { } else {
mem_addr_4kb += pq_mem_4kb; mem_addr_4kb += pq_mem_4kb;
...@@ -480,8 +480,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, ...@@ -480,8 +480,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
if (p_params->pf_id < MAX_NUM_PFS_BB) if (p_params->pf_id < MAX_NUM_PFS_BB)
crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET; crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
else else
crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET + crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
(p_params->pf_id % MAX_NUM_PFS_BB); crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq); inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
...@@ -498,11 +498,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, ...@@ -498,11 +498,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
QM_WFQ_CRD_REG_SIGN_BIT); QM_WFQ_CRD_REG_SIGN_BIT);
} }
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
inc_val);
STORE_RT_REG(p_hwfn, STORE_RT_REG(p_hwfn,
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT); QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
inc_val);
return 0; return 0;
} }
...@@ -576,6 +576,12 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, ...@@ -576,6 +576,12 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
{ {
u8 i, vport_id; u8 i, vport_id;
if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn,
"Invalid VPORT ID for rate limiter configuration");
return -1;
}
/* go over all PF VPORTs */ /* go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl); u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
...@@ -785,6 +791,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn, ...@@ -785,6 +791,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
{ {
u32 inc_val = QM_RL_INC_VAL(vport_rl); u32 inc_val = QM_RL_INC_VAL(vport_rl);
if (vport_id >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn,
"Invalid VPORT ID for rate limiter configuration");
return -1;
}
if (inc_val > QM_RL_MAX_INC_VAL) { if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration"); DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
return -1; return -1;
...@@ -940,12 +952,6 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, ...@@ -940,12 +952,6 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
eth_geneve_enable ? 1 : 0); eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0); qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
/* comp ver */
reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
/* EDPM with geneve tunnel not supported in BB_B0 */ /* EDPM with geneve tunnel not supported in BB_B0 */
if (QED_IS_BB_B0(p_hwfn->cdev)) if (QED_IS_BB_B0(p_hwfn->cdev))
return; return;
......
...@@ -554,7 +554,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data) ...@@ -554,7 +554,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
} }
/* First Dword contains metadata and should be skipped */ /* First Dword contains metadata and should be skipped */
buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32)); buf_hdr = (struct bin_buffer_hdr *)data;
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset; offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)(data + offset); fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
......
...@@ -594,7 +594,7 @@ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags) ...@@ -594,7 +594,7 @@ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
u8 bd_flags = 0; u8 bd_flags = 0;
if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST)) if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1); SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
return bd_flags; return bd_flags;
} }
...@@ -755,8 +755,8 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, ...@@ -755,8 +755,8 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
p_buffer->placement_offset; p_buffer->placement_offset;
parse_flags = p_buffer->parse_flags; parse_flags = p_buffer->parse_flags;
bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags); bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1); SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1); SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1, rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
p_buffer->vlan, bd_flags, p_buffer->vlan, bd_flags,
...@@ -1588,7 +1588,8 @@ static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn, ...@@ -1588,7 +1588,8 @@ static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
p_tx->cur_send_frag_num++; p_tx->cur_send_frag_num++;
} }
static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, static void
qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2, struct qed_ll2_info *p_ll2,
struct qed_ll2_tx_packet *p_curp, struct qed_ll2_tx_packet *p_curp,
u8 num_of_bds, u8 num_of_bds,
...@@ -1596,25 +1597,25 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, ...@@ -1596,25 +1597,25 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
u16 vlan, u16 vlan,
u8 bd_flags, u8 bd_flags,
u16 l4_hdr_offset_w, u16 l4_hdr_offset_w,
enum core_roce_flavor_type type, enum core_roce_flavor_type roce_flavor,
dma_addr_t first_frag, dma_addr_t first_frag,
u16 first_frag_len) u16 first_frag_len)
{ {
struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain; struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain); u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
struct core_tx_bd *start_bd = NULL; struct core_tx_bd *start_bd = NULL;
u16 frag_idx; u16 bd_data = 0, frag_idx;
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan); start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
cpu_to_le16(l4_hdr_offset_w)); cpu_to_le16(l4_hdr_offset_w));
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
start_bd->bd_flags.as_bitfield = bd_flags; bd_data |= bd_flags;
start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK << SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
CORE_TX_BD_FLAGS_START_BD_SHIFT; SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds); SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type); start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
DMA_REGPAIR_LE(start_bd->addr, first_frag); DMA_REGPAIR_LE(start_bd->addr, first_frag);
start_bd->nbytes = cpu_to_le16(first_frag_len); start_bd->nbytes = cpu_to_le16(first_frag_len);
...@@ -1639,9 +1640,8 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, ...@@ -1639,9 +1640,8 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd; struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
(*p_bd)->bd_flags.as_bitfield = 0; (*p_bd)->bd_data.as_bitfield = 0;
(*p_bd)->bitfield1 = 0; (*p_bd)->bitfield1 = 0;
(*p_bd)->bitfield0 = 0;
p_curp->bds_set[frag_idx].tx_frag = 0; p_curp->bds_set[frag_idx].tx_frag = 0;
p_curp->bds_set[frag_idx].frag_len = 0; p_curp->bds_set[frag_idx].frag_len = 0;
} }
...@@ -2238,11 +2238,11 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) ...@@ -2238,11 +2238,11 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
/* Request HW to calculate IP csum */ /* Request HW to calculate IP csum */
if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) && if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT); flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
if (skb_vlan_tag_present(skb)) { if (skb_vlan_tag_present(skb)) {
vlan = skb_vlan_tag_get(skb); vlan = skb_vlan_tag_get(skb);
flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT); flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
} }
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
......
...@@ -356,6 +356,10 @@ ...@@ -356,6 +356,10 @@
0x238804UL 0x238804UL
#define RDIF_REG_STOP_ON_ERROR \ #define RDIF_REG_STOP_ON_ERROR \
0x300040UL 0x300040UL
#define RDIF_REG_DEBUG_ERROR_INFO \
0x300400UL
#define RDIF_REG_DEBUG_ERROR_INFO_SIZE \
64
#define SRC_REG_SOFT_RST \ #define SRC_REG_SOFT_RST \
0x23874cUL 0x23874cUL
#define TCFC_REG_ACTIVITY_COUNTER \ #define TCFC_REG_ACTIVITY_COUNTER \
...@@ -370,6 +374,10 @@ ...@@ -370,6 +374,10 @@
0x1700004UL 0x1700004UL
#define TDIF_REG_STOP_ON_ERROR \ #define TDIF_REG_STOP_ON_ERROR \
0x310040UL 0x310040UL
#define TDIF_REG_DEBUG_ERROR_INFO \
0x310400UL
#define TDIF_REG_DEBUG_ERROR_INFO_SIZE \
64
#define UCM_REG_INIT \ #define UCM_REG_INIT \
0x1280000UL 0x1280000UL
#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \ #define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
...@@ -1236,6 +1244,26 @@ ...@@ -1236,6 +1244,26 @@
0x1901534UL 0x1901534UL
#define USEM_REG_DBG_FORCE_FRAME \ #define USEM_REG_DBG_FORCE_FRAME \
0x1901538UL 0x1901538UL
#define NWS_REG_DBG_SELECT \
0x700128UL
#define NWS_REG_DBG_DWORD_ENABLE \
0x70012cUL
#define NWS_REG_DBG_SHIFT \
0x700130UL
#define NWS_REG_DBG_FORCE_VALID \
0x700134UL
#define NWS_REG_DBG_FORCE_FRAME \
0x700138UL
#define MS_REG_DBG_SELECT \
0x6a0228UL
#define MS_REG_DBG_DWORD_ENABLE \
0x6a022cUL
#define MS_REG_DBG_SHIFT \
0x6a0230UL
#define MS_REG_DBG_FORCE_VALID \
0x6a0234UL
#define MS_REG_DBG_FORCE_FRAME \
0x6a0238UL
#define PCIE_REG_DBG_COMMON_SELECT \ #define PCIE_REG_DBG_COMMON_SELECT \
0x054398UL 0x054398UL
#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \ #define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
...@@ -1448,6 +1476,8 @@ ...@@ -1448,6 +1476,8 @@
0x000b48UL 0x000b48UL
#define RSS_REG_RSS_RAM_DATA \ #define RSS_REG_RSS_RAM_DATA \
0x238c20UL 0x238c20UL
#define RSS_REG_RSS_RAM_DATA_SIZE \
4
#define MISC_REG_BLOCK_256B_EN \ #define MISC_REG_BLOCK_256B_EN \
0x008c14UL 0x008c14UL
#define NWS_REG_NWS_CMU \ #define NWS_REG_NWS_CMU \
......
This diff is collapsed.
...@@ -82,6 +82,7 @@ struct qed_rdma_info { ...@@ -82,6 +82,7 @@ struct qed_rdma_info {
struct qed_bmap qp_map; struct qed_bmap qp_map;
struct qed_bmap srq_map; struct qed_bmap srq_map;
struct qed_bmap cid_map; struct qed_bmap cid_map;
struct qed_bmap real_cid_map;
struct qed_bmap dpi_map; struct qed_bmap dpi_map;
struct qed_bmap toggle_bits; struct qed_bmap toggle_bits;
struct qed_rdma_events events; struct qed_rdma_events events;
...@@ -92,6 +93,7 @@ struct qed_rdma_info { ...@@ -92,6 +93,7 @@ struct qed_rdma_info {
u32 num_qps; u32 num_qps;
u32 num_mrs; u32 num_mrs;
u16 queue_zone_base; u16 queue_zone_base;
u16 max_queue_zones;
enum protocol_type proto; enum protocol_type proto;
}; };
...@@ -153,6 +155,7 @@ struct qed_rdma_qp { ...@@ -153,6 +155,7 @@ struct qed_rdma_qp {
dma_addr_t irq_phys_addr; dma_addr_t irq_phys_addr;
u8 irq_num_pages; u8 irq_num_pages;
bool resp_offloaded; bool resp_offloaded;
u32 cq_prod;
u8 remote_mac_addr[6]; u8 remote_mac_addr[6];
u8 local_mac_addr[6]; u8 local_mac_addr[6];
...@@ -163,8 +166,8 @@ struct qed_rdma_qp { ...@@ -163,8 +166,8 @@ struct qed_rdma_qp {
#if IS_ENABLED(CONFIG_QED_RDMA) #if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_async_roce_event(struct qed_hwfn *p_hwfn, void qed_roce_async_event(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe); u8 fw_event_code, union rdma_eqe_data *rdma_data);
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle, u8 connection_handle,
void *cookie, void *cookie,
...@@ -187,7 +190,9 @@ void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn, ...@@ -187,7 +190,9 @@ void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u16 src_mac_addr_lo, bool b_last_packet); u16 src_mac_addr_lo, bool b_last_packet);
#else #else
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {} static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
union rdma_eqe_data *rdma_data) {}
static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle, u8 connection_handle,
void *cookie, void *cookie,
......
...@@ -296,9 +296,12 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, ...@@ -296,9 +296,12 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe) struct event_ring_entry *p_eqe)
{ {
switch (p_eqe->protocol_id) { switch (p_eqe->protocol_id) {
#if IS_ENABLED(CONFIG_QED_RDMA)
case PROTOCOLID_ROCE: case PROTOCOLID_ROCE:
qed_async_roce_event(p_hwfn, p_eqe); qed_roce_async_event(p_hwfn, p_eqe->opcode,
&p_eqe->data.rdma_data);
return 0; return 0;
#endif
case PROTOCOLID_COMMON: case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn, return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode, p_eqe->opcode,
...@@ -306,14 +309,6 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, ...@@ -306,14 +309,6 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
case PROTOCOLID_ISCSI: case PROTOCOLID_ISCSI:
if (!IS_ENABLED(CONFIG_QED_ISCSI)) if (!IS_ENABLED(CONFIG_QED_ISCSI))
return -EINVAL; return -EINVAL;
if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
qed_ooo_release_connection_isles(p_hwfn,
p_hwfn->p_ooo_info,
cid);
return 0;
}
if (p_hwfn->p_iscsi_info->event_cb) { if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#define QEDE_MAJOR_VERSION 8 #define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 10 #define QEDE_MINOR_VERSION 10
#define QEDE_REVISION_VERSION 10 #define QEDE_REVISION_VERSION 10
#define QEDE_ENGINEERING_VERSION 20 #define QEDE_ENGINEERING_VERSION 21
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \ __stringify(QEDE_MINOR_VERSION) "." \
__stringify(QEDE_REVISION_VERSION) "." \ __stringify(QEDE_REVISION_VERSION) "." \
......
obj-$(CONFIG_QEDF) := qedf.o obj-$(CONFIG_QEDF) := qedf.o
qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \ qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \
qedf_attr.o qedf_els.o qedf_attr.o qedf_els.o drv_scsi_fw_funcs.o drv_fcoe_fw_funcs.o
qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o
/* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include "drv_fcoe_fw_funcs.h"
#include "drv_scsi_fw_funcs.h"
#define FCOE_RX_ID ((u32)0x0000FFFF)
static inline void init_common_sqe(struct fcoe_task_params *task_params,
enum fcoe_sqe_request_type request_type)
{
memset(task_params->sqe, 0, sizeof(*(task_params->sqe)));
SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE,
request_type);
task_params->sqe->task_id = task_params->itid;
}
int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
struct scsi_sgl_task_params *sgl_task_params,
struct regpair sense_data_buffer_phys_addr,
u32 task_retry_id,
u8 fcp_cmd_payload[32])
{
struct fcoe_task_context *ctx = task_params->context;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
u32 io_size, val;
bool slow_sgl;
memset(ctx, 0, sizeof(*(ctx)));
slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
sgl_task_params->small_mid_sge);
io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
task_params->tx_io_size : task_params->rx_io_size);
/* Ystorm ctx */
y_st_ctx = &ctx->ystorm_st_context;
y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
y_st_ctx->task_type = task_params->task_type;
memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
/* Tstorm ctx */
t_st_ctx = &ctx->tstorm_st_context;
t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
FCOE_TASK_DEV_TYPE_TAPE :
FCOE_TASK_DEV_TYPE_DISK);
t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
val = cpu_to_le32(task_params->cq_rss_number);
t_st_ctx->read_only.glbl_q_num = val;
t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
t_st_ctx->read_only.task_type = task_params->task_type;
SET_FIELD(t_st_ctx->read_write.flags,
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
/* Ustorm ctx */
u_ag_ctx = &ctx->ustorm_ag_context;
u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
/* Mstorm buffer for sense/rsp data placement */
m_st_ctx = &ctx->mstorm_st_context;
val = cpu_to_le32(sense_data_buffer_phys_addr.hi);
m_st_ctx->rsp_buf_addr.hi = val;
val = cpu_to_le32(sense_data_buffer_phys_addr.lo);
m_st_ctx->rsp_buf_addr.lo = val;
if (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
/* Ystorm ctx */
y_st_ctx->expect_first_xfer = 1;
/* Set the amount of super SGEs. Can be up to 4. */
SET_FIELD(y_st_ctx->sgl_mode,
YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
(slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
init_scsi_sgl_context(&y_st_ctx->sgl_params,
&y_st_ctx->data_desc,
sgl_task_params);
/* Mstorm ctx */
SET_FIELD(m_st_ctx->flags,
MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
(slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
} else {
/* Tstorm ctx */
SET_FIELD(t_st_ctx->read_write.flags,
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
(slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
/* Mstorm ctx */
m_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
init_scsi_sgl_context(&m_st_ctx->sgl_params,
&m_st_ctx->data_desc,
sgl_task_params);
}
init_common_sqe(task_params, SEND_FCOE_CMD);
return 0;
}
int init_initiator_midpath_unsolicited_fcoe_task(
struct fcoe_task_params *task_params,
struct fcoe_tx_mid_path_params *mid_path_fc_header,
struct scsi_sgl_task_params *tx_sgl_task_params,
struct scsi_sgl_task_params *rx_sgl_task_params,
u8 fw_to_place_fc_header)
{
struct fcoe_task_context *ctx = task_params->context;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
u32 val;
memset(ctx, 0, sizeof(*(ctx)));
/* Init Ystorm */
y_st_ctx = &ctx->ystorm_st_context;
init_scsi_sgl_context(&y_st_ctx->sgl_params,
&y_st_ctx->data_desc,
tx_sgl_task_params);
SET_FIELD(y_st_ctx->sgl_mode,
YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
y_st_ctx->task_type = task_params->task_type;
memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
/* Init Mstorm */
m_st_ctx = &ctx->mstorm_st_context;
init_scsi_sgl_context(&m_st_ctx->sgl_params,
&m_st_ctx->data_desc,
rx_sgl_task_params);
SET_FIELD(m_st_ctx->flags,
MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER,
fw_to_place_fc_header);
m_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->rx_io_size);
/* Init Tstorm */
t_st_ctx = &ctx->tstorm_st_context;
t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
val = cpu_to_le32(task_params->cq_rss_number);
t_st_ctx->read_only.glbl_q_num = val;
t_st_ctx->read_only.task_type = task_params->task_type;
SET_FIELD(t_st_ctx->read_write.flags,
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
/* Init Ustorm */
u_ag_ctx = &ctx->ustorm_ag_context;
u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
/* Init SQE */
init_common_sqe(task_params, SEND_FCOE_MIDPATH);
task_params->sqe->additional_info_union.burst_length =
tx_sgl_task_params->total_buffer_size;
SET_FIELD(task_params->sqe->flags,
FCOE_WQE_NUM_SGES, tx_sgl_task_params->num_sges);
SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE,
SCSI_FAST_SGL);
return 0;
}
int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params)
{
init_common_sqe(task_params, SEND_FCOE_ABTS_REQUEST);
return 0;
}
int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
{
init_common_sqe(task_params, FCOE_EXCHANGE_CLEANUP);
return 0;
}
int init_initiator_sequence_recovery_fcoe_task(
struct fcoe_task_params *task_params, u32 off)
{
init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
return 0;
}
/* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _FCOE_FW_FUNCS_H
#define _FCOE_FW_FUNCS_H
#include "drv_scsi_fw_funcs.h"
#include "qedf_hsi.h"
#include <linux/qed/qed_if.h>
struct fcoe_task_params {
/* Output parameter [set/filled by the HSI function] */
struct fcoe_task_context *context;
/* Output parameter [set/filled by the HSI function] */
struct fcoe_wqe *sqe;
enum fcoe_task_type task_type;
u32 tx_io_size; /* in bytes */
u32 rx_io_size; /* in bytes */
u32 conn_cid;
u16 itid;
u8 cq_rss_number;
/* Whether it's Tape device or not (0=Disk, 1=Tape) */
u8 is_tape_device;
};
/**
* @brief init_initiator_rw_fcoe_task - Initializes FCoE task context for
* read/write task types and init fcoe_sqe
*
* @param task_params - Pointer to task parameters struct
* @param sgl_task_params - Pointer to SGL task params
* @param sense_data_buffer_phys_addr - Pointer to sense data buffer
* @param task_retry_id - retry identification - Used only for Tape device
* @param fcp_cmnd_payload - FCP CMD Payload
*/
int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
struct scsi_sgl_task_params *sgl_task_params,
struct regpair sense_data_buffer_phys_addr,
u32 task_retry_id,
u8 fcp_cmd_payload[32]);
/**
* @brief init_initiator_midpath_fcoe_task - Initializes FCoE task context for
* midpath/unsolicited task types and init fcoe_sqe
*
* @param task_params - Pointer to task parameters struct
* @param mid_path_fc_header - FC header
* @param tx_sgl_task_params - Pointer to Tx SGL task params
* @param rx_sgl_task_params - Pointer to Rx SGL task params
* @param fw_to_place_fc_header - Indication if the FW will place the FC header
* in addition to the data arrives.
*/
int init_initiator_midpath_unsolicited_fcoe_task(
struct fcoe_task_params *task_params,
struct fcoe_tx_mid_path_params *mid_path_fc_header,
struct scsi_sgl_task_params *tx_sgl_task_params,
struct scsi_sgl_task_params *rx_sgl_task_params,
u8 fw_to_place_fc_header);
/**
* @brief init_initiator_abort_fcoe_task - Initializes FCoE task context for
* abort task types and init fcoe_sqe
*
* @param task_params - Pointer to task parameters struct
*/
int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params);
/**
* @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
* cleanup task types and init fcoe_sqe
*
*
* @param task_params - Pointer to task parameters struct
*/
int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params);
/**
* @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
* sequence recovery task types and init fcoe_sqe
*
*
* @param task_params - Pointer to task parameters struct
* @param desired_offset - The desired offest the task will be re-sent from
*/
int init_initiator_sequence_recovery_fcoe_task(
struct fcoe_task_params *task_params,
u32 desired_offset);
#endif
/* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#include "drv_scsi_fw_funcs.h"
#define SCSI_NUM_SGES_IN_CACHE 0x4
bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
{
return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
}
void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
struct scsi_cached_sges *ctx_data_desc,
struct scsi_sgl_task_params *sgl_task_params)
{
/* no need to check for sgl_task_params->sgl validity */
u8 num_sges_to_init = sgl_task_params->num_sges >
SCSI_NUM_SGES_IN_CACHE ? SCSI_NUM_SGES_IN_CACHE :
sgl_task_params->num_sges;
u8 sge_index;
u32 val;
val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
ctx_sgl_params->sgl_addr.lo = val;
val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
ctx_sgl_params->sgl_addr.hi = val;
val = cpu_to_le32(sgl_task_params->total_buffer_size);
ctx_sgl_params->sgl_total_length = val;
ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) {
val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
ctx_data_desc->sge[sge_index].sge_addr.lo = val;
val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
ctx_data_desc->sge[sge_index].sge_addr.hi = val;
val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
ctx_data_desc->sge[sge_index].sge_len = val;
}
}
/* QLogic FCoE Offload Driver
* Copyright (c) 2016 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _SCSI_FW_FUNCS_H
#define _SCSI_FW_FUNCS_H
#include <linux/qed/common_hsi.h>
#include <linux/qed/storage_common.h>
#include <linux/qed/fcoe_common.h>
struct scsi_sgl_task_params {
struct scsi_sge *sgl;
struct regpair sgl_phys_addr;
u32 total_buffer_size;
u16 num_sges;
/* true if SGL contains a small (< 4KB) SGE in middle(not 1st or last)
* -> relevant for tx only
*/
bool small_mid_sge;
};
struct scsi_dif_task_params {
u32 initial_ref_tag;
bool initial_ref_tag_is_valid;
u16 application_tag;
u16 application_tag_mask;
u16 dif_block_size_log;
bool dif_on_network;
bool dif_on_host;
u8 host_guard_type;
u8 protection_type;
u8 ref_tag_mask;
bool crc_seed;
/* Enable Connection error upon DIF error (segments with DIF errors are
* dropped)
*/
bool tx_dif_conn_err_en;
bool ignore_app_tag;
bool keep_ref_tag_const;
bool validate_guard;
bool validate_app_tag;
bool validate_ref_tag;
bool forward_guard;
bool forward_app_tag;
bool forward_ref_tag;
bool forward_app_tag_with_mask;
bool forward_ref_tag_with_mask;
};
struct scsi_initiator_cmd_params {
/* for cdb_size > default CDB size (extended CDB > 16 bytes) ->
* pointer to the CDB buffer SGE
*/
struct scsi_sge extended_cdb_sge;
/* Physical address of sense data buffer for sense data - 256B buffer */
struct regpair sense_data_buffer_phys_addr;
};
/**
* @brief scsi_is_slow_sgl - checks for slow SGL
*
* @param num_sges - number of sges in SGL
* @param small_mid_sge - True is the SGL contains an SGE which is smaller than
* 4KB and its not the 1st or last SGE in the SGL
*/
bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge);
/**
* @brief init_scsi_sgl_context - initializes SGL task context
*
* @param sgl_params - SGL context parameters to initialize (output parameter)
* @param data_desc - context struct containing SGEs array to set (output
* parameter)
* @param sgl_task_params - SGL parameters (input)
*/
void init_scsi_sgl_context(struct scsi_sgl_params *sgl_params,
struct scsi_cached_sges *ctx_data_desc,
struct scsi_sgl_task_params *sgl_task_params);
#endif
This diff is collapsed.
...@@ -25,6 +25,9 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, ...@@ -25,6 +25,9 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
uint16_t xid; uint16_t xid;
uint32_t start_time = jiffies / HZ; uint32_t start_time = jiffies / HZ;
uint32_t current_time; uint32_t current_time;
struct fcoe_wqe *sqe;
unsigned long flags;
u16 sqe_idx;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n"); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
...@@ -113,20 +116,25 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, ...@@ -113,20 +116,25 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
/* Obtain exchange id */ /* Obtain exchange id */
xid = els_req->xid; xid = els_req->xid;
spin_lock_irqsave(&fcport->rport_lock, flags);
sqe_idx = qedf_get_sqe_idx(fcport);
sqe = &fcport->sq[sqe_idx];
memset(sqe, 0, sizeof(struct fcoe_wqe));
/* Initialize task context for this IO request */ /* Initialize task context for this IO request */
task = qedf_get_task_mem(&qedf->tasks, xid); task = qedf_get_task_mem(&qedf->tasks, xid);
qedf_init_mp_task(els_req, task); qedf_init_mp_task(els_req, task, sqe);
/* Put timer on original I/O request */ /* Put timer on original I/O request */
if (timer_msec) if (timer_msec)
qedf_cmd_timer_set(qedf, els_req, timer_msec); qedf_cmd_timer_set(qedf, els_req, timer_msec);
qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
/* Ring doorbell */ /* Ring doorbell */
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS " QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
"req\n"); "req\n");
qedf_ring_doorbell(fcport); qedf_ring_doorbell(fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags);
els_err: els_err:
return rc; return rc;
} }
...@@ -604,6 +612,8 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req, ...@@ -604,6 +612,8 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
struct qedf_rport *fcport; struct qedf_rport *fcport;
unsigned long flags; unsigned long flags;
struct qedf_els_cb_arg *cb_arg; struct qedf_els_cb_arg *cb_arg;
struct fcoe_wqe *sqe;
u16 sqe_idx;
fcport = orig_io_req->fcport; fcport = orig_io_req->fcport;
...@@ -631,8 +641,13 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req, ...@@ -631,8 +641,13 @@ static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
spin_lock_irqsave(&fcport->rport_lock, flags); spin_lock_irqsave(&fcport->rport_lock, flags);
qedf_add_to_sq(fcport, orig_io_req->xid, 0, sqe_idx = qedf_get_sqe_idx(fcport);
FCOE_TASK_TYPE_SEQUENCE_CLEANUP, offset); sqe = &fcport->sq[sqe_idx];
memset(sqe, 0, sizeof(struct fcoe_wqe));
orig_io_req->task_params->sqe = sqe;
init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
offset);
qedf_ring_doorbell(fcport); qedf_ring_doorbell(fcport);
spin_unlock_irqrestore(&fcport->rport_lock, flags); spin_unlock_irqrestore(&fcport->rport_lock, flags);
......
This diff is collapsed.
obj-$(CONFIG_QEDI) := qedi.o obj-$(CONFIG_QEDI) := qedi.o
qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \ qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
qedi_dbg.o qedi_dbg.o qedi_fw_api.o
qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -173,6 +173,7 @@ enum tcp_seg_placement_event { ...@@ -173,6 +173,7 @@ enum tcp_seg_placement_event {
TCP_EVENT_ADD_ISLE_RIGHT, TCP_EVENT_ADD_ISLE_RIGHT,
TCP_EVENT_ADD_ISLE_LEFT, TCP_EVENT_ADD_ISLE_LEFT,
TCP_EVENT_JOIN, TCP_EVENT_JOIN,
TCP_EVENT_DELETE_ISLES,
TCP_EVENT_NOP, TCP_EVENT_NOP,
MAX_TCP_SEG_PLACEMENT_EVENT MAX_TCP_SEG_PLACEMENT_EVENT
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment