Commit fbbfa34c authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-fixes'

Yuval Mintz says:

====================
qed: Fix dependencies and warnings series

The first patch in this series follows Dan Carpenter's reports about
Smatch warnings for recent qed additions and fixes those.

The second patch is the most significant one [and the reason this is
ntended for 'net'] - it's based on Arnd Bermann's suggestion for fixing
compilation issues that were introduced with the roce addition as a result
of certain combinations of qed, qede and qedr Kconfig options.

The third follows the discussion with Arnd and clears a lot of the warnings
that arise when compiling the drivers with "C=1".

Please consider applying this series to 'net'.
====================
Acked-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 76506a98 8c93beaf
...@@ -107,10 +107,14 @@ config QEDE ...@@ -107,10 +107,14 @@ config QEDE
---help--- ---help---
This enables the support for ... This enables the support for ...
config QED_RDMA
bool
config INFINIBAND_QEDR config INFINIBAND_QEDR
tristate "QLogic qede RoCE sources [debug]" tristate "QLogic qede RoCE sources [debug]"
depends on QEDE && 64BIT depends on QEDE && 64BIT
select QED_LL2 select QED_LL2
select QED_RDMA
default n default n
---help--- ---help---
This provides a temporary node that allows the compilation This provides a temporary node that allows the compilation
......
...@@ -5,4 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ ...@@ -5,4 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_selftest.o qed_dcbx.o qed_debug.o qed_selftest.o qed_dcbx.o qed_debug.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o qed-$(CONFIG_QED_RDMA) += qed_roce.o
...@@ -47,13 +47,8 @@ ...@@ -47,13 +47,8 @@
#define TM_ALIGN BIT(TM_SHIFT) #define TM_ALIGN BIT(TM_SHIFT)
#define TM_ELEM_SIZE 4 #define TM_ELEM_SIZE 4
/* ILT constants */
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */ /* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
#define ILT_DEFAULT_HW_P_SIZE 4 #define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
#else
#define ILT_DEFAULT_HW_P_SIZE 3
#endif
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
...@@ -349,14 +344,14 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn, ...@@ -349,14 +344,14 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
return NULL; return NULL;
} }
void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs) static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
{ {
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
p_mgr->srq_count = num_srqs; p_mgr->srq_count = num_srqs;
} }
u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
{ {
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
...@@ -1804,8 +1799,8 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info) ...@@ -1804,8 +1799,8 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
return 0; return 0;
} }
void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
struct qed_rdma_pf_params *p_params) struct qed_rdma_pf_params *p_params)
{ {
u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs; u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
enum protocol_type proto; enum protocol_type proto;
......
...@@ -405,7 +405,7 @@ struct phy_defs { ...@@ -405,7 +405,7 @@ struct phy_defs {
/***************************** Constant Arrays *******************************/ /***************************** Constant Arrays *******************************/
/* Debug arrays */ /* Debug arrays */
static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} }; static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
/* Chip constant definitions array */ /* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
...@@ -4028,10 +4028,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn, ...@@ -4028,10 +4028,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
} }
/* Dump MCP Trace */ /* Dump MCP Trace */
enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
u32 *dump_buf, u32 *dump_buf,
bool dump, u32 *num_dumped_dwords) bool dump, u32 *num_dumped_dwords)
{ {
u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords; u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
u32 trace_meta_size_dwords, running_bundle_id, offset = 0; u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
...@@ -4130,10 +4130,10 @@ enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, ...@@ -4130,10 +4130,10 @@ enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
} }
/* Dump GRC FIFO */ /* Dump GRC FIFO */
enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
u32 *dump_buf, u32 *dump_buf,
bool dump, u32 *num_dumped_dwords) bool dump, u32 *num_dumped_dwords)
{ {
u32 offset = 0, dwords_read, size_param_offset; u32 offset = 0, dwords_read, size_param_offset;
bool fifo_has_data; bool fifo_has_data;
...@@ -4192,10 +4192,10 @@ enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, ...@@ -4192,10 +4192,10 @@ enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
} }
/* Dump IGU FIFO */ /* Dump IGU FIFO */
enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
u32 *dump_buf, u32 *dump_buf,
bool dump, u32 *num_dumped_dwords) bool dump, u32 *num_dumped_dwords)
{ {
u32 offset = 0, dwords_read, size_param_offset; u32 offset = 0, dwords_read, size_param_offset;
bool fifo_has_data; bool fifo_has_data;
...@@ -4255,10 +4255,11 @@ enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, ...@@ -4255,10 +4255,11 @@ enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
} }
/* Protection Override dump */ /* Protection Override dump */
enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
u32 *dump_buf, u32 *dump_buf,
bool dump, u32 *num_dumped_dwords) bool dump,
u32 *num_dumped_dwords)
{ {
u32 offset = 0, size_param_offset, override_window_dwords; u32 offset = 0, size_param_offset, override_window_dwords;
...@@ -6339,10 +6340,11 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, ...@@ -6339,10 +6340,11 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
} }
/* Wrapper for unifying the idle_chk and mcp_trace api */ /* Wrapper for unifying the idle_chk and mcp_trace api */
enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, static enum dbg_status
u32 *dump_buf, qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
u32 num_dumped_dwords, u32 *dump_buf,
char *results_buf) u32 num_dumped_dwords,
char *results_buf)
{ {
u32 num_errors, num_warnnings; u32 num_errors, num_warnnings;
...@@ -6413,8 +6415,8 @@ static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size) ...@@ -6413,8 +6415,8 @@ static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
#define QED_RESULTS_BUF_MIN_SIZE 16 #define QED_RESULTS_BUF_MIN_SIZE 16
/* Generic function for decoding debug feature info */ /* Generic function for decoding debug feature info */
enum dbg_status format_feature(struct qed_hwfn *p_hwfn, static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
enum qed_dbg_features feature_idx) enum qed_dbg_features feature_idx)
{ {
struct qed_dbg_feature *feature = struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_params.features[feature_idx]; &p_hwfn->cdev->dbg_params.features[feature_idx];
...@@ -6480,8 +6482,9 @@ enum dbg_status format_feature(struct qed_hwfn *p_hwfn, ...@@ -6480,8 +6482,9 @@ enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
} }
/* Generic function for performing the dump of a debug feature. */ /* Generic function for performing the dump of a debug feature. */
enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
enum qed_dbg_features feature_idx) struct qed_ptt *p_ptt,
enum qed_dbg_features feature_idx)
{ {
struct qed_dbg_feature *feature = struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_params.features[feature_idx]; &p_hwfn->cdev->dbg_params.features[feature_idx];
......
...@@ -497,12 +497,13 @@ int qed_resc_alloc(struct qed_dev *cdev) ...@@ -497,12 +497,13 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
num_cons = qed_cxt_get_proto_cid_count(p_hwfn, num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
PROTOCOLID_ROCE, PROTOCOLID_ROCE,
0) * 2; NULL) * 2;
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
num_cons = num_cons =
qed_cxt_get_proto_cid_count(p_hwfn, qed_cxt_get_proto_cid_count(p_hwfn,
PROTOCOLID_ISCSI, 0); PROTOCOLID_ISCSI,
NULL);
n_eqes += 2 * num_cons; n_eqes += 2 * num_cons;
} }
...@@ -1422,19 +1423,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -1422,19 +1423,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
u32 *feat_num = p_hwfn->hw_info.feat_num; u32 *feat_num = p_hwfn->hw_info.feat_num;
int num_features = 1; int num_features = 1;
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) if (IS_ENABLED(CONFIG_QED_RDMA) &&
/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
* status blocks equally between L2 / RoCE but with consideration as /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
* to how many l2 queues / cnqs we have * the status blocks equally between L2 / RoCE but with
*/ * consideration as to how many l2 queues / cnqs we have.
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { */
num_features++; num_features++;
feat_num[QED_RDMA_CNQ] = feat_num[QED_RDMA_CNQ] =
min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
} }
#endif
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
num_features, num_features,
RESC_NUM(p_hwfn, QED_L2_QUEUE)); RESC_NUM(p_hwfn, QED_L2_QUEUE));
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include "qed_mcp.h" #include "qed_mcp.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_roce.h"
#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred) #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred) #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
...@@ -140,11 +141,11 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev) ...@@ -140,11 +141,11 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
qed_ll2_dealloc_buffer(cdev, buffer); qed_ll2_dealloc_buffer(cdev, buffer);
} }
void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle, u8 connection_handle,
struct qed_ll2_rx_packet *p_pkt, struct qed_ll2_rx_packet *p_pkt,
struct core_rx_fast_path_cqe *p_cqe, struct core_rx_fast_path_cqe *p_cqe,
bool b_last_packet) bool b_last_packet)
{ {
u16 packet_length = le16_to_cpu(p_cqe->packet_length); u16 packet_length = le16_to_cpu(p_cqe->packet_length);
struct qed_ll2_buffer *buffer = p_pkt->cookie; struct qed_ll2_buffer *buffer = p_pkt->cookie;
...@@ -515,7 +516,7 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) ...@@ -515,7 +516,7 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
return rc; return rc;
} }
void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
{ {
struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ll2_info *p_ll2_conn = NULL;
struct qed_ll2_rx_packet *p_pkt = NULL; struct qed_ll2_rx_packet *p_pkt = NULL;
...@@ -1123,9 +1124,6 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, ...@@ -1123,9 +1124,6 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(start_bd->addr, first_frag); DMA_REGPAIR_LE(start_bd->addr, first_frag);
start_bd->nbytes = cpu_to_le16(first_frag_len); start_bd->nbytes = cpu_to_le16(first_frag_len);
SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
type);
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2), (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
......
...@@ -293,24 +293,4 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn, ...@@ -293,24 +293,4 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn,
*/ */
void qed_ll2_free(struct qed_hwfn *p_hwfn, void qed_ll2_free(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_connections); struct qed_ll2_info *p_ll2_connections);
void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr,
u16 data_length,
u8 data_length_error,
u16 parse_flags,
u16 vlan,
u32 src_mac_addr_hi,
u16 src_mac_addr_lo, bool b_last_packet);
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet);
void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet);
#endif #endif
...@@ -33,10 +33,8 @@ ...@@ -33,10 +33,8 @@
#include "qed_hw.h" #include "qed_hw.h"
#include "qed_selftest.h" #include "qed_selftest.h"
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#define QED_ROCE_QPS (8192) #define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8) #define QED_ROCE_DPIS (8)
#endif
static char version[] = static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
...@@ -682,9 +680,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, ...@@ -682,9 +680,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
enum qed_int_mode int_mode) enum qed_int_mode int_mode)
{ {
struct qed_sb_cnt_info sb_cnt_info; struct qed_sb_cnt_info sb_cnt_info;
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) int num_l2_queues = 0;
int num_l2_queues;
#endif
int rc; int rc;
int i; int i;
...@@ -715,8 +711,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, ...@@ -715,8 +711,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns; cdev->num_hwfns;
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) if (!IS_ENABLED(CONFIG_QED_RDMA))
num_l2_queues = 0; return 0;
for_each_hwfn(cdev, i) for_each_hwfn(cdev, i)
num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
...@@ -738,7 +735,6 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, ...@@ -738,7 +735,6 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
cdev->int_params.rdma_msix_cnt, cdev->int_params.rdma_msix_cnt,
cdev->int_params.rdma_msix_base); cdev->int_params.rdma_msix_base);
#endif
return 0; return 0;
} }
...@@ -843,18 +839,20 @@ static void qed_update_pf_params(struct qed_dev *cdev, ...@@ -843,18 +839,20 @@ static void qed_update_pf_params(struct qed_dev *cdev,
{ {
int i; int i;
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
/* divide by 3 the MRs to avoid MF ILT overflow */
params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
#endif
for (i = 0; i < cdev->num_hwfns; i++) { for (i = 0; i < cdev->num_hwfns; i++) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
p_hwfn->pf_params = *params; p_hwfn->pf_params = *params;
} }
if (!IS_ENABLED(CONFIG_QED_RDMA))
return;
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
/* divide by 3 the MRs to avoid MF ILT overflow */
params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
} }
static int qed_slowpath_start(struct qed_dev *cdev, static int qed_slowpath_start(struct qed_dev *cdev,
...@@ -1432,7 +1430,7 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) ...@@ -1432,7 +1430,7 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status; return status;
} }
struct qed_selftest_ops qed_selftest_ops_pass = { static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory, .selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt, .selftest_interrupt = &qed_selftest_interrupt,
.selftest_register = &qed_selftest_register, .selftest_register = &qed_selftest_register,
......
This diff is collapsed.
...@@ -95,26 +95,6 @@ struct qed_rdma_info { ...@@ -95,26 +95,6 @@ struct qed_rdma_info {
enum protocol_type proto; enum protocol_type proto;
}; };
struct qed_rdma_resize_cq_in_params {
u16 icid;
u32 cq_size;
bool pbl_two_level;
u64 pbl_ptr;
u16 pbl_num_pages;
u8 pbl_page_size_log;
};
struct qed_rdma_resize_cq_out_params {
u32 prod;
u32 cons;
};
struct qed_rdma_resize_cnq_in_params {
u32 cnq_id;
u32 pbl_page_size_log;
u64 pbl_ptr;
};
struct qed_rdma_qp { struct qed_rdma_qp {
struct regpair qp_handle; struct regpair qp_handle;
struct regpair qp_handle_async; struct regpair qp_handle_async;
...@@ -181,36 +161,55 @@ struct qed_rdma_qp { ...@@ -181,36 +161,55 @@ struct qed_rdma_qp {
dma_addr_t shared_queue_phys_addr; dma_addr_t shared_queue_phys_addr;
}; };
int #if IS_ENABLED(CONFIG_QED_RDMA)
qed_rdma_add_user(void *rdma_cxt, void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
struct qed_rdma_add_user_out_params *out_params);
int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
int
qed_rdma_register_tid(void *rdma_cxt,
struct qed_rdma_register_tid_in_params *params);
void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
int qed_rdma_stop(void *rdma_cxt);
u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
void qed_async_roce_event(struct qed_hwfn *p_hwfn, void qed_async_roce_event(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe); struct event_ring_entry *p_eqe);
int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp); void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp, u8 connection_handle,
struct qed_rdma_modify_qp_in_params *params); void *cookie,
int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp, dma_addr_t first_frag_addr,
struct qed_rdma_query_qp_out_params *out_params); bool b_last_fragment, bool b_last_packet);
void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) u8 connection_handle,
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment, bool b_last_packet);
void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr,
u16 data_length,
u8 data_length_error,
u16 parse_flags,
u16 vlan,
u32 src_mac_addr_hi,
u16 src_mac_addr_lo, bool b_last_packet);
#else #else
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment,
bool b_last_packet) {}
static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment,
bool b_last_packet) {}
static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr,
u16 data_length,
u8 data_length_error,
u16 parse_flags,
u16 vlan,
u32 src_mac_addr_hi,
u16 src_mac_addr_lo,
bool b_last_packet) {}
#endif #endif
#endif #endif
...@@ -80,7 +80,6 @@ union ramrod_data { ...@@ -80,7 +80,6 @@ union ramrod_data {
struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp; struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req; struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
struct rdma_create_cq_ramrod_data rdma_create_cq; struct rdma_create_cq_ramrod_data rdma_create_cq;
struct rdma_resize_cq_ramrod_data rdma_resize_cq;
struct rdma_destroy_cq_ramrod_data rdma_destroy_cq; struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
struct rdma_srq_create_ramrod_data rdma_create_srq; struct rdma_srq_create_ramrod_data rdma_create_srq;
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
......
...@@ -28,9 +28,7 @@ ...@@ -28,9 +28,7 @@
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h" #include "qed_sriov.h"
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#include "qed_roce.h" #include "qed_roce.h"
#endif
/*************************************************************************** /***************************************************************************
* Structures & Definitions * Structures & Definitions
...@@ -240,11 +238,9 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, ...@@ -240,11 +238,9 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe) struct event_ring_entry *p_eqe)
{ {
switch (p_eqe->protocol_id) { switch (p_eqe->protocol_id) {
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
case PROTOCOLID_ROCE: case PROTOCOLID_ROCE:
qed_async_roce_event(p_hwfn, p_eqe); qed_async_roce_event(p_hwfn, p_eqe);
return 0; return 0;
#endif
case PROTOCOLID_COMMON: case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn, return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode, p_eqe->opcode,
......
...@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o ...@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o qede-y := qede_main.o qede_ethtool.o
qede-$(CONFIG_DCB) += qede_dcbnl.o qede-$(CONFIG_DCB) += qede_dcbnl.o
qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o qede-$(CONFIG_QED_RDMA) += qede_roce.o
...@@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv); ...@@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv);
bool qede_roce_supported(struct qede_dev *dev); bool qede_roce_supported(struct qede_dev *dev);
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) #if IS_ENABLED(CONFIG_QED_RDMA)
int qede_roce_dev_add(struct qede_dev *dev); int qede_roce_dev_add(struct qede_dev *dev);
void qede_roce_dev_event_open(struct qede_dev *dev); void qede_roce_dev_event_open(struct qede_dev *dev);
void qede_roce_dev_event_close(struct qede_dev *dev); void qede_roce_dev_event_close(struct qede_dev *dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment