Commit 0e913f28 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-Introduce-support-for-CQEv1-2'

Ido Schimmel says:

====================
mlxsw: Introduce support for CQEv1/2

Jiri says:

Current SwitchX2 and Spectrum FWs support CQEv0 and that is what we
implement in mlxsw. Spectrum FW also supports CQE v1 and v2.
However, Spectrum-2 won't support CQEv0. Prepare for it and setup the
CQE versions to use according to what is queried from FW.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents faa1cd82 41107685
......@@ -424,10 +424,15 @@ MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
/* cmd_mbox_query_aq_cap_log_max_cq_sz
* Log (base 2) of max CQEs allowed on CQ.
* Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv0 and CQEv1.
*/
MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
/* cmd_mbox_query_aq_cap_log_max_cqv2_sz
* Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv2.
*/
MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cqv2_sz, 0x08, 16, 8);
/* cmd_mbox_query_aq_cap_max_num_cqs
* Maximum number of CQs.
*/
......@@ -662,6 +667,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
/* cmd_mbox_config_set_cqe_version
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1);
/* cmd_mbox_config_profile_max_vepa_channels
* Maximum number of VEPA channels per port (0 through 16)
* 0 - multi-channel VEPA is disabled
......@@ -841,6 +852,14 @@ MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
0x60, 0, 8, 0x08, 0x00, false);
/* cmd_mbox_config_profile_cqe_version
* CQE version:
* 0: CQE version is 0
* 1: CQE version is either 1 or 2
* CQE ver 1 or 2 is configured by Completion Queue Context field cqe_ver.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, cqe_version, 0xB0, 0, 8);
/* ACCESS_REG - Access EMAD Supported Register
* ----------------------------------
* OpMod == 0 (N/A), INMmod == 0 (N/A)
......@@ -1032,11 +1051,15 @@ static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
}
/* cmd_mbox_sw2hw_cq_cv
enum mlxsw_cmd_mbox_sw2hw_cq_cqe_ver {
MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1,
MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2,
};
/* cmd_mbox_sw2hw_cq_cqe_ver
* CQE Version.
* 0 - CQE Version 0, 1 - CQE Version 1
*/
MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cqe_ver, 0x00, 28, 4);
/* cmd_mbox_sw2hw_cq_c_eqn
* Event Queue this CQ reports completion events to.
......
This diff is collapsed.
......@@ -82,10 +82,12 @@
#define MLXSW_PCI_AQ_PAGES 8
#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */
#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
#define MLXSW_PCI_CQE2_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE2_SIZE)
#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80
......@@ -126,10 +128,48 @@ MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
*/
MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
enum mlxsw_pci_cqe_v {
MLXSW_PCI_CQE_V0,
MLXSW_PCI_CQE_V1,
MLXSW_PCI_CQE_V2,
};
#define mlxsw_pci_cqe_item_helpers(name, v0, v1, v2) \
static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \
{ \
switch (v) { \
default: \
case MLXSW_PCI_CQE_V0: \
return mlxsw_pci_cqe##v0##_##name##_get(cqe); \
case MLXSW_PCI_CQE_V1: \
return mlxsw_pci_cqe##v1##_##name##_get(cqe); \
case MLXSW_PCI_CQE_V2: \
return mlxsw_pci_cqe##v2##_##name##_get(cqe); \
} \
} \
static inline void mlxsw_pci_cqe_##name##_set(enum mlxsw_pci_cqe_v v, \
char *cqe, u32 val) \
{ \
switch (v) { \
default: \
case MLXSW_PCI_CQE_V0: \
mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \
break; \
case MLXSW_PCI_CQE_V1: \
mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \
break; \
case MLXSW_PCI_CQE_V2: \
mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \
break; \
} \
}
/* pci_cqe_lag
* Packet arrives from a port which is a LAG
*/
MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
MLXSW_ITEM32(pci, cqe0, lag, 0x00, 23, 1);
MLXSW_ITEM32(pci, cqe12, lag, 0x00, 24, 1);
mlxsw_pci_cqe_item_helpers(lag, 0, 12, 12);
/* pci_cqe_system_port/lag_id
* When lag=0: System port on which the packet was received
......@@ -138,8 +178,12 @@ MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
* bits [3:0] sub_port on which the packet was received
*/
MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12);
MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4);
MLXSW_ITEM32(pci, cqe0, lag_id, 0x00, 4, 12);
MLXSW_ITEM32(pci, cqe12, lag_id, 0x00, 0, 16);
mlxsw_pci_cqe_item_helpers(lag_id, 0, 12, 12);
MLXSW_ITEM32(pci, cqe0, lag_subport, 0x00, 0, 4);
MLXSW_ITEM32(pci, cqe12, lag_subport, 0x00, 16, 8);
mlxsw_pci_cqe_item_helpers(lag_subport, 0, 12, 12);
/* pci_cqe_wqe_counter
* WQE count of the WQEs completed on the associated dqn
......@@ -162,28 +206,38 @@ MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9);
* Length include CRC. Indicates the length field includes
* the packet's CRC.
*/
MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1);
MLXSW_ITEM32(pci, cqe0, crc, 0x0C, 8, 1);
MLXSW_ITEM32(pci, cqe12, crc, 0x0C, 9, 1);
mlxsw_pci_cqe_item_helpers(crc, 0, 12, 12);
/* pci_cqe_e
* CQE with Error.
*/
MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
MLXSW_ITEM32(pci, cqe0, e, 0x0C, 7, 1);
MLXSW_ITEM32(pci, cqe12, e, 0x00, 27, 1);
mlxsw_pci_cqe_item_helpers(e, 0, 12, 12);
/* pci_cqe_sr
* 1 - Send Queue
* 0 - Receive Queue
*/
MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
MLXSW_ITEM32(pci, cqe0, sr, 0x0C, 6, 1);
MLXSW_ITEM32(pci, cqe12, sr, 0x00, 26, 1);
mlxsw_pci_cqe_item_helpers(sr, 0, 12, 12);
/* pci_cqe_dqn
* Descriptor Queue (DQ) Number.
*/
MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5);
MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6);
mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
/* pci_cqe_owner
* Ownership bit.
*/
MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
MLXSW_ITEM32(pci, cqe01, owner, 0x0C, 0, 1);
MLXSW_ITEM32(pci, cqe2, owner, 0x1C, 0, 1);
mlxsw_pci_cqe_item_helpers(owner, 01, 01, 2);
/* pci_eqe_event_type
* Event type.
......
......@@ -43,6 +43,9 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
MLXSW_RES_ID_CQE_V0,
MLXSW_RES_ID_CQE_V1,
MLXSW_RES_ID_CQE_V2,
MLXSW_RES_ID_COUNTER_POOL_SIZE,
MLXSW_RES_ID_MAX_SPAN,
MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
......@@ -81,6 +84,9 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
[MLXSW_RES_ID_CQE_V0] = 0x2210,
[MLXSW_RES_ID_CQE_V1] = 0x2211,
[MLXSW_RES_ID_CQE_V2] = 0x2212,
[MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410,
[MLXSW_RES_ID_MAX_SPAN] = 0x2420,
[MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment