Commit a3348722 authored by Barak Witkowski's avatar Barak Witkowski Committed by David S. Miller

bnx2x: add afex support

Following patch adds afex multifunction support to the driver (afex
multifunction is based on vntag header) and updates FW version used to 7.2.51.

Support includes the following:

1. Configure vif parameters in firmware (default vlan, vif id, default
   priority, allowed priorities) according to values received from NIC.
2. Configure FW to strip/add default vlan according to afex vlan mode.
3. Notify link up to OS only after vif is fully initialized.
4. Support vif list set/get requests and configure FW accordingly.
5. Supply afex statistics upon request from NIC.
6. Special handling to L2 interface in case of FCoE vif.
Signed-off-by: default avatarBarak Witkowski <barak@broadcom.com>
Signed-off-by: default avatarEilon Greenstein <eilong@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5b263f53
...@@ -1063,6 +1063,13 @@ struct bnx2x_slowpath { ...@@ -1063,6 +1063,13 @@ struct bnx2x_slowpath {
struct flow_control_configuration pfc_config; struct flow_control_configuration pfc_config;
} func_rdata; } func_rdata;
/* afex ramrod can not be a part of func_rdata union because these
* events might arrive in parallel to other events from func_rdata.
* Therefore, if they would have been defined in the same union,
* data can get corrupted.
*/
struct afex_vif_list_ramrod_data func_afex_rdata;
/* used by dmae command executer */ /* used by dmae command executer */
struct dmae_command dmae[MAX_DMAE_C]; struct dmae_command dmae[MAX_DMAE_C];
...@@ -1179,6 +1186,7 @@ struct bnx2x_fw_stats_data { ...@@ -1179,6 +1186,7 @@ struct bnx2x_fw_stats_data {
enum { enum {
BNX2X_SP_RTNL_SETUP_TC, BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT, BNX2X_SP_RTNL_TX_TIMEOUT,
BNX2X_SP_RTNL_AFEX_F_UPDATE,
BNX2X_SP_RTNL_FAN_FAILURE, BNX2X_SP_RTNL_FAN_FAILURE,
}; };
...@@ -1343,13 +1351,14 @@ struct bnx2x { ...@@ -1343,13 +1351,14 @@ struct bnx2x {
struct cmng_init cmng; struct cmng_init cmng;
u32 mf_config[E1HVN_MAX]; u32 mf_config[E1HVN_MAX];
u32 mf2_config[E2_FUNC_MAX]; u32 mf_ext_config;
u32 path_has_ovlan; /* E3 */ u32 path_has_ovlan; /* E3 */
u16 mf_ov; u16 mf_ov;
u8 mf_mode; u8 mf_mode;
#define IS_MF(bp) (bp->mf_mode != 0) #define IS_MF(bp) (bp->mf_mode != 0)
#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI) #define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD) #define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
#define IS_MF_AFEX(bp) (bp->mf_mode == MULTI_FUNCTION_AFEX)
u8 wol; u8 wol;
...@@ -1592,6 +1601,9 @@ struct bnx2x { ...@@ -1592,6 +1601,9 @@ struct bnx2x {
struct dcbx_features dcbx_remote_feat; struct dcbx_features dcbx_remote_feat;
u32 dcbx_remote_flags; u32 dcbx_remote_flags;
#endif #endif
/* AFEX: store default vlan used */
int afex_def_vlan_tag;
enum mf_cfg_afex_vlan_mode afex_vlan_mode;
u32 pending_max; u32 pending_max;
/* multiple tx classes of service */ /* multiple tx classes of service */
...@@ -2148,9 +2160,16 @@ void bnx2x_notify_link_changed(struct bnx2x *bp); ...@@ -2148,9 +2160,16 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) #define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp))
#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) #define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))
#define BNX2X_MF_EXT_PROTOCOL_FCOE(bp) ((bp)->mf_ext_config & \
MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
#define IS_MF_FCOE_AFEX(bp) (IS_MF_AFEX(bp) && BNX2X_MF_EXT_PROTOCOL_FCOE(bp))
#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \ #define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
(BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
#else
#define IS_MF_FCOE_AFEX(bp) false
#endif #endif
#endif /* bnx2x.h */ #endif /* bnx2x.h */
...@@ -1467,8 +1467,8 @@ void bnx2x_set_num_queues(struct bnx2x *bp) ...@@ -1467,8 +1467,8 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
bp->num_queues = bnx2x_calc_num_queues(bp); bp->num_queues = bnx2x_calc_num_queues(bp);
#ifdef BCM_CNIC #ifdef BCM_CNIC
/* override in STORAGE SD mode */ /* override in STORAGE SD modes */
if (IS_MF_STORAGE_SD(bp)) if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
bp->num_queues = 1; bp->num_queues = 1;
#endif #endif
/* Add special queues */ /* Add special queues */
...@@ -1900,8 +1900,14 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) ...@@ -1900,8 +1900,14 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
SHMEM2_WR(bp, dcc_support, SHMEM2_WR(bp, dcc_support,
(SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
if (SHMEM2_HAS(bp, afex_driver_support))
SHMEM2_WR(bp, afex_driver_support,
SHMEM_AFEX_SUPPORTED_VERSION_ONE);
} }
/* Set AFEX default VLAN tag to an invalid value */
bp->afex_def_vlan_tag = -1;
bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
rc = bnx2x_func_start(bp); rc = bnx2x_func_start(bp);
if (rc) { if (rc) {
...@@ -3073,7 +3079,8 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) ...@@ -3073,7 +3079,8 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
} }
#ifdef BCM_CNIC #ifdef BCM_CNIC
if (IS_MF_STORAGE_SD(bp) && !is_zero_ether_addr(addr->sa_data)) { if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
!is_zero_ether_addr(addr->sa_data)) {
BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n"); BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
return -EINVAL; return -EINVAL;
} }
...@@ -3195,7 +3202,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) ...@@ -3195,7 +3202,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
int rx_ring_size = 0; int rx_ring_size = 0;
#ifdef BCM_CNIC #ifdef BCM_CNIC
if (!bp->rx_ring_size && IS_MF_STORAGE_SD(bp)) { if (!bp->rx_ring_size &&
(IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
rx_ring_size = MIN_RX_SIZE_NONTPA; rx_ring_size = MIN_RX_SIZE_NONTPA;
bp->rx_ring_size = rx_ring_size; bp->rx_ring_size = rx_ring_size;
} else } else
......
...@@ -1694,7 +1694,8 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) ...@@ -1694,7 +1694,8 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
if (is_valid_ether_addr(addr)) if (is_valid_ether_addr(addr))
return true; return true;
#ifdef BCM_CNIC #ifdef BCM_CNIC
if (is_zero_ether_addr(addr) && IS_MF_STORAGE_SD(bp)) if (is_zero_ether_addr(addr) &&
(IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))
return true; return true;
#endif #endif
return false; return false;
......
...@@ -1430,7 +1430,7 @@ static void bnx2x_get_ringparam(struct net_device *dev, ...@@ -1430,7 +1430,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,
else else
ering->rx_pending = MAX_RX_AVAIL; ering->rx_pending = MAX_RX_AVAIL;
ering->tx_max_pending = MAX_TX_AVAIL; ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
ering->tx_pending = bp->tx_ring_size; ering->tx_pending = bp->tx_ring_size;
} }
...@@ -1448,7 +1448,7 @@ static int bnx2x_set_ringparam(struct net_device *dev, ...@@ -1448,7 +1448,7 @@ static int bnx2x_set_ringparam(struct net_device *dev,
if ((ering->rx_pending > MAX_RX_AVAIL) || if ((ering->rx_pending > MAX_RX_AVAIL) ||
(ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA : (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
MIN_RX_SIZE_TPA)) || MIN_RX_SIZE_TPA)) ||
(ering->tx_pending > MAX_TX_AVAIL) || (ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) ||
(ering->tx_pending <= MAX_SKB_FRAGS + 4)) { (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EINVAL; return -EINVAL;
......
...@@ -387,7 +387,7 @@ ...@@ -387,7 +387,7 @@
#define STATS_QUERY_CMD_COUNT 16 #define STATS_QUERY_CMD_COUNT 16
#define NIV_LIST_TABLE_SIZE 4096 #define AFEX_LIST_TABLE_SIZE 4096
#define INVALID_VNIC_ID 0xFF #define INVALID_VNIC_ID 0xFF
......
...@@ -833,6 +833,7 @@ struct shared_feat_cfg { /* NVRAM Offset */ ...@@ -833,6 +833,7 @@ struct shared_feat_cfg { /* NVRAM Offset */
#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
#define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400
/* The interval in seconds between sending LLDP packets. Set to zero /* The interval in seconds between sending LLDP packets. Set to zero
to disable the feature */ to disable the feature */
...@@ -1235,6 +1236,8 @@ struct drv_func_mb { ...@@ -1235,6 +1236,8 @@ struct drv_func_mb {
#define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
#define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED 0xa2000000
#define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
#define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201 #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
...@@ -1242,6 +1245,13 @@ struct drv_func_mb { ...@@ -1242,6 +1245,13 @@ struct drv_func_mb {
#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_AFEX_DRIVER_SETMAC 0xd0000000
#define DRV_MSG_CODE_AFEX_LISTGET_ACK 0xd1000000
#define DRV_MSG_CODE_AFEX_LISTSET_ACK 0xd2000000
#define DRV_MSG_CODE_AFEX_STATSGET_ACK 0xd3000000
#define DRV_MSG_CODE_AFEX_VIFSET_ACK 0xd4000000
#define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000 #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
#define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000 #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
...@@ -1299,6 +1309,14 @@ struct drv_func_mb { ...@@ -1299,6 +1309,14 @@ struct drv_func_mb {
#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
#define FW_MSG_CODE_HW_SET_INVALID_IMAGE 0xb0100000
#define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE 0xd0100000
#define FW_MSG_CODE_AFEX_LISTGET_ACK 0xd1100000
#define FW_MSG_CODE_AFEX_LISTSET_ACK 0xd2100000
#define FW_MSG_CODE_AFEX_STATSGET_ACK 0xd3100000
#define FW_MSG_CODE_AFEX_VIFSET_ACK 0xd4100000
#define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000 #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
#define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000 #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
...@@ -1357,6 +1375,12 @@ struct drv_func_mb { ...@@ -1357,6 +1375,12 @@ struct drv_func_mb {
#define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
#define DRV_STATUS_AFEX_EVENT_MASK 0x03f00000
#define DRV_STATUS_AFEX_LISTGET_REQ 0x00100000
#define DRV_STATUS_AFEX_LISTSET_REQ 0x00200000
#define DRV_STATUS_AFEX_STATSGET_REQ 0x00400000
#define DRV_STATUS_AFEX_VIFSET_REQ 0x00800000
#define DRV_STATUS_DRV_INFO_REQ 0x04000000 #define DRV_STATUS_DRV_INFO_REQ 0x04000000
u32 virt_mac_upper; u32 virt_mac_upper;
...@@ -1448,7 +1472,26 @@ struct func_mf_cfg { ...@@ -1448,7 +1472,26 @@ struct func_mf_cfg {
#define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0
#define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK
u32 reserved[2]; /* afex default VLAN ID - 12 bits */
#define FUNC_MF_CFG_AFEX_VLAN_MASK 0x0fff0000
#define FUNC_MF_CFG_AFEX_VLAN_SHIFT 16
u32 afex_config;
#define FUNC_MF_CFG_AFEX_COS_FILTER_MASK 0x000000ff
#define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT 0
#define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK 0x0000ff00
#define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT 8
#define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL 0x00000100
#define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK 0x000f0000
#define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT 16
u32 reserved;
};
enum mf_cfg_afex_vlan_mode {
FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0,
FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE,
FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE
}; };
/* This structure is not applicable and should not be accessed on 57711 */ /* This structure is not applicable and should not be accessed on 57711 */
...@@ -1945,18 +1988,29 @@ struct shmem2_region { ...@@ -1945,18 +1988,29 @@ struct shmem2_region {
u32 nvm_retain_bitmap_addr; /* 0x0070 */ u32 nvm_retain_bitmap_addr; /* 0x0070 */
u32 reserved1; /* 0x0074 */ /* afex support of that driver */
u32 afex_driver_support; /* 0x0074 */
#define SHMEM_AFEX_VERSION_MASK 0x100f
#define SHMEM_AFEX_SUPPORTED_VERSION_ONE 0x1001
#define SHMEM_AFEX_REDUCED_DRV_LOADED 0x8000
u32 reserved2[E2_FUNC_MAX]; /* driver receives addr in scratchpad to which it should respond */
u32 afex_scratchpad_addr_to_write[E2_FUNC_MAX];
u32 reserved3[E2_FUNC_MAX];/* 0x0088 */ /* generic params from MCP to driver (value depends on the msg sent
u32 reserved4[E2_FUNC_MAX];/* 0x0098 */ * to driver
*/
u32 afex_param1_to_driver[E2_FUNC_MAX]; /* 0x0088 */
u32 afex_param2_to_driver[E2_FUNC_MAX]; /* 0x0098 */
u32 swim_base_addr; /* 0x0108 */ u32 swim_base_addr; /* 0x0108 */
u32 swim_funcs; u32 swim_funcs;
u32 swim_main_cb; u32 swim_main_cb;
u32 reserved5[2]; /* bitmap notifying which VIF profiles stored in nvram are enabled by
* switch
*/
u32 afex_profiles_enabled[2];
/* generic flags controlled by the driver */ /* generic flags controlled by the driver */
u32 drv_flags; u32 drv_flags;
...@@ -2696,10 +2750,51 @@ union drv_info_to_mcp { ...@@ -2696,10 +2750,51 @@ union drv_info_to_mcp {
struct fcoe_stats_info fcoe_stat; struct fcoe_stats_info fcoe_stat;
struct iscsi_stats_info iscsi_stat; struct iscsi_stats_info iscsi_stat;
}; };
/* stats collected for afex.
* NOTE: structure is exactly as expected to be received by the switch.
* order must remain exactly as is unless protocol changes !
*/
struct afex_stats {
u32 tx_unicast_frames_hi;
u32 tx_unicast_frames_lo;
u32 tx_unicast_bytes_hi;
u32 tx_unicast_bytes_lo;
u32 tx_multicast_frames_hi;
u32 tx_multicast_frames_lo;
u32 tx_multicast_bytes_hi;
u32 tx_multicast_bytes_lo;
u32 tx_broadcast_frames_hi;
u32 tx_broadcast_frames_lo;
u32 tx_broadcast_bytes_hi;
u32 tx_broadcast_bytes_lo;
u32 tx_frames_discarded_hi;
u32 tx_frames_discarded_lo;
u32 tx_frames_dropped_hi;
u32 tx_frames_dropped_lo;
u32 rx_unicast_frames_hi;
u32 rx_unicast_frames_lo;
u32 rx_unicast_bytes_hi;
u32 rx_unicast_bytes_lo;
u32 rx_multicast_frames_hi;
u32 rx_multicast_frames_lo;
u32 rx_multicast_bytes_hi;
u32 rx_multicast_bytes_lo;
u32 rx_broadcast_frames_hi;
u32 rx_broadcast_frames_lo;
u32 rx_broadcast_bytes_hi;
u32 rx_broadcast_bytes_lo;
u32 rx_frames_discarded_hi;
u32 rx_frames_discarded_lo;
u32 rx_frames_dropped_hi;
u32 rx_frames_dropped_lo;
};
#define BCM_5710_FW_MAJOR_VERSION 7 #define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 2 #define BCM_5710_FW_MINOR_VERSION 2
#define BCM_5710_FW_REVISION_VERSION 16 #define BCM_5710_FW_REVISION_VERSION 51
#define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1 #define BCM_5710_FW_COMPILE_FLAGS 1
...@@ -3389,7 +3484,7 @@ struct client_init_tx_data { ...@@ -3389,7 +3484,7 @@ struct client_init_tx_data {
#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4) #define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4 #define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
u8 default_vlan_flg; u8 default_vlan_flg;
u8 reserved2; u8 force_default_pri_flg;
__le32 reserved3; __le32 reserved3;
}; };
...@@ -4374,9 +4469,22 @@ struct fcoe_statistics_params { ...@@ -4374,9 +4469,22 @@ struct fcoe_statistics_params {
}; };
/*
* The data afex vif list ramrod need
*/
struct afex_vif_list_ramrod_data {
u8 afex_vif_list_command;
u8 func_bit_map;
__le16 vif_list_index;
u8 func_to_clear;
u8 echo;
__le16 reserved1;
};
/* /*
* cfc delete event data * cfc delete event data
*/ */
struct cfc_del_event_data { struct cfc_del_event_data {
u32 cid; u32 cid;
u32 reserved0; u32 reserved0;
...@@ -4521,7 +4629,7 @@ enum common_spqe_cmd_id { ...@@ -4521,7 +4629,7 @@ enum common_spqe_cmd_id {
RAMROD_CMD_ID_COMMON_STAT_QUERY, RAMROD_CMD_ID_COMMON_STAT_QUERY,
RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
RAMROD_CMD_ID_COMMON_START_TRAFFIC, RAMROD_CMD_ID_COMMON_START_TRAFFIC,
RAMROD_CMD_ID_COMMON_RESERVED1, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
MAX_COMMON_SPQE_CMD_ID MAX_COMMON_SPQE_CMD_ID
}; };
...@@ -4728,6 +4836,17 @@ struct malicious_vf_event_data { ...@@ -4728,6 +4836,17 @@ struct malicious_vf_event_data {
u32 reserved3; u32 reserved3;
}; };
/*
* vif list event data
*/
struct vif_list_event_data {
u8 func_bit_map;
u8 echo;
__le16 reserved0;
__le32 reserved1;
__le32 reserved2;
};
/* /*
* union for all event ring message types * union for all event ring message types
*/ */
...@@ -4737,6 +4856,7 @@ union event_data { ...@@ -4737,6 +4856,7 @@ union event_data {
struct cfc_del_event_data cfc_del_event; struct cfc_del_event_data cfc_del_event;
struct vf_flr_event_data vf_flr_event; struct vf_flr_event_data vf_flr_event;
struct malicious_vf_event_data malicious_vf_event; struct malicious_vf_event_data malicious_vf_event;
struct vif_list_event_data vif_list_event;
}; };
...@@ -4802,7 +4922,7 @@ enum event_ring_opcode { ...@@ -4802,7 +4922,7 @@ enum event_ring_opcode {
EVENT_RING_OPCODE_FORWARD_SETUP, EVENT_RING_OPCODE_FORWARD_SETUP,
EVENT_RING_OPCODE_RSS_UPDATE_RULES, EVENT_RING_OPCODE_RSS_UPDATE_RULES,
EVENT_RING_OPCODE_FUNCTION_UPDATE, EVENT_RING_OPCODE_FUNCTION_UPDATE,
EVENT_RING_OPCODE_RESERVED1, EVENT_RING_OPCODE_AFEX_VIF_LISTS,
EVENT_RING_OPCODE_SET_MAC, EVENT_RING_OPCODE_SET_MAC,
EVENT_RING_OPCODE_CLASSIFICATION_RULES, EVENT_RING_OPCODE_CLASSIFICATION_RULES,
EVENT_RING_OPCODE_FILTERS_RULES, EVENT_RING_OPCODE_FILTERS_RULES,
...@@ -4849,12 +4969,27 @@ struct flow_control_configuration { ...@@ -4849,12 +4969,27 @@ struct flow_control_configuration {
struct function_start_data { struct function_start_data {
__le16 function_mode; __le16 function_mode;
__le16 sd_vlan_tag; __le16 sd_vlan_tag;
u16 reserved; __le16 vif_id;
u8 path_id; u8 path_id;
u8 network_cos_mode; u8 network_cos_mode;
}; };
struct function_update_data {
u8 vif_id_change_flg;
u8 afex_default_vlan_change_flg;
u8 allowed_priorities_change_flg;
u8 network_cos_mode_change_flg;
__le16 vif_id;
__le16 afex_default_vlan;
u8 allowed_priorities;
u8 network_cos_mode;
u8 lb_mode_en;
u8 reserved0;
__le32 reserved1;
};
/* /*
* FW version stored in the Xstorm RAM * FW version stored in the Xstorm RAM
*/ */
...@@ -5052,7 +5187,7 @@ enum mf_mode { ...@@ -5052,7 +5187,7 @@ enum mf_mode {
SINGLE_FUNCTION, SINGLE_FUNCTION,
MULTI_FUNCTION_SD, MULTI_FUNCTION_SD,
MULTI_FUNCTION_SI, MULTI_FUNCTION_SI,
MULTI_FUNCTION_RESERVED, MULTI_FUNCTION_AFEX,
MAX_MF_MODE MAX_MF_MODE
}; };
...@@ -5177,6 +5312,7 @@ union protocol_common_specific_data { ...@@ -5177,6 +5312,7 @@ union protocol_common_specific_data {
u8 protocol_data[8]; u8 protocol_data[8];
struct regpair phy_address; struct regpair phy_address;
struct regpair mac_config_addr; struct regpair mac_config_addr;
struct afex_vif_list_ramrod_data afex_vif_list_data;
}; };
/* /*
...@@ -5355,6 +5491,18 @@ enum vf_pf_channel_state { ...@@ -5355,6 +5491,18 @@ enum vf_pf_channel_state {
}; };
/*
* vif_list_rule_kind
*/
enum vif_list_rule_kind {
VIF_LIST_RULE_SET,
VIF_LIST_RULE_GET,
VIF_LIST_RULE_CLEAR_ALL,
VIF_LIST_RULE_CLEAR_FUNC,
MAX_VIF_LIST_RULE_KIND
};
/* /*
* zone A per-queue data * zone A per-queue data
*/ */
......
...@@ -125,7 +125,7 @@ enum { ...@@ -125,7 +125,7 @@ enum {
MODE_MF = 0x00000100, MODE_MF = 0x00000100,
MODE_MF_SD = 0x00000200, MODE_MF_SD = 0x00000200,
MODE_MF_SI = 0x00000400, MODE_MF_SI = 0x00000400,
MODE_MF_NIV = 0x00000800, MODE_MF_AFEX = 0x00000800,
MODE_E3_A0 = 0x00001000, MODE_E3_A0 = 0x00001000,
MODE_E3_B0 = 0x00002000, MODE_E3_B0 = 0x00002000,
MODE_COS3 = 0x00004000, MODE_COS3 = 0x00004000,
......
...@@ -6800,6 +6800,10 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) ...@@ -6800,6 +6800,10 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
else else
rc = bnx2x_update_link_down(params, vars); rc = bnx2x_update_link_down(params, vars);
/* Update MCP link status was changed */
if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX)
bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0);
return rc; return rc;
} }
......
...@@ -254,6 +254,7 @@ struct link_params { ...@@ -254,6 +254,7 @@ struct link_params {
#define FEATURE_CONFIG_PFC_ENABLED (1<<1) #define FEATURE_CONFIG_PFC_ENABLED (1<<1)
#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) #define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) #define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
#define FEATURE_CONFIG_BC_SUPPORTS_AFEX (1<<8)
#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9) #define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10) #define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11) #define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11)
......
...@@ -633,14 +633,17 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) ...@@ -633,14 +633,17 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
} }
static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp, void bnx2x_set_mac_in_nig(struct bnx2x *bp,
bool add, unsigned char *dev_addr, int index) bool add, unsigned char *dev_addr, int index)
{ {
u32 wb_data[2]; u32 wb_data[2];
u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
NIG_REG_LLH0_FUNC_MEM; NIG_REG_LLH0_FUNC_MEM;
if (!IS_MF_SI(bp) || index > BNX2X_LLH_CAM_MAX_PF_LINE) if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
return;
if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
return; return;
DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
...@@ -4398,6 +4401,9 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, ...@@ -4398,6 +4401,9 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
test_bit(BNX2X_Q_FLG_TX_SWITCH, flags); test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
tx_data->anti_spoofing_flg = tx_data->anti_spoofing_flg =
test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
tx_data->force_default_pri_flg =
test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
tx_data->tx_status_block_id = params->fw_sb_id; tx_data->tx_status_block_id = params->fw_sb_id;
tx_data->tx_sb_index_number = params->sb_cq_index; tx_data->tx_sb_index_number = params->sb_cq_index;
tx_data->tss_leading_client_id = params->tss_leading_cl_id; tx_data->tss_leading_client_id = params->tss_leading_cl_id;
...@@ -5325,6 +5331,17 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, ...@@ -5325,6 +5331,17 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
case BNX2X_F_STATE_STARTED: case BNX2X_F_STATE_STARTED:
if (cmd == BNX2X_F_CMD_STOP) if (cmd == BNX2X_F_CMD_STOP)
next_state = BNX2X_F_STATE_INITIALIZED; next_state = BNX2X_F_STATE_INITIALIZED;
/* afex ramrods can be sent only in started mode, and only
* if not pending for function_stop ramrod completion
* for these events - next state remained STARTED.
*/
else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
(!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
next_state = BNX2X_F_STATE_STARTED;
else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
(!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
next_state = BNX2X_F_STATE_STARTED;
else if (cmd == BNX2X_F_CMD_TX_STOP) else if (cmd == BNX2X_F_CMD_TX_STOP)
next_state = BNX2X_F_STATE_TX_STOPPED; next_state = BNX2X_F_STATE_TX_STOPPED;
...@@ -5612,6 +5629,83 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, ...@@ -5612,6 +5629,83 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
U64_LO(data_mapping), NONE_CONNECTION_TYPE); U64_LO(data_mapping), NONE_CONNECTION_TYPE);
} }
static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
struct bnx2x_func_state_params *params)
{
struct bnx2x_func_sp_obj *o = params->f_obj;
struct function_update_data *rdata =
(struct function_update_data *)o->afex_rdata;
dma_addr_t data_mapping = o->afex_rdata_mapping;
struct bnx2x_func_afex_update_params *afex_update_params =
&params->params.afex_update;
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
rdata->vif_id_change_flg = 1;
rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
rdata->afex_default_vlan_change_flg = 1;
rdata->afex_default_vlan =
cpu_to_le16(afex_update_params->afex_default_vlan);
rdata->allowed_priorities_change_flg = 1;
rdata->allowed_priorities = afex_update_params->allowed_priorities;
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there
* (inside bnx2x_sp_post()).
*/
DP(BNX2X_MSG_SP,
"afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
rdata->vif_id,
rdata->afex_default_vlan, rdata->allowed_priorities);
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
}
static
inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
struct bnx2x_func_state_params *params)
{
struct bnx2x_func_sp_obj *o = params->f_obj;
struct afex_vif_list_ramrod_data *rdata =
(struct afex_vif_list_ramrod_data *)o->afex_rdata;
struct bnx2x_func_afex_viflists_params *afex_viflist_params =
&params->params.afex_viflists;
u64 *p_rdata = (u64 *)rdata;
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
rdata->vif_list_index = afex_viflist_params->vif_list_index;
rdata->func_bit_map = afex_viflist_params->func_bit_map;
rdata->afex_vif_list_command =
afex_viflist_params->afex_vif_list_command;
rdata->func_to_clear = afex_viflist_params->func_to_clear;
/* send in echo type of sub command */
rdata->echo = afex_viflist_params->afex_vif_list_command;
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
* read and we will have to put a full memory barrier there
* (inside bnx2x_sp_post()).
*/
DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
rdata->afex_vif_list_command, rdata->vif_list_index,
rdata->func_bit_map, rdata->func_to_clear);
/* this ramrod sends data directly and not through DMA mapping */
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
U64_HI(*p_rdata), U64_LO(*p_rdata),
NONE_CONNECTION_TYPE);
}
static inline int bnx2x_func_send_stop(struct bnx2x *bp, static inline int bnx2x_func_send_stop(struct bnx2x *bp,
struct bnx2x_func_state_params *params) struct bnx2x_func_state_params *params)
{ {
...@@ -5663,6 +5757,10 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp, ...@@ -5663,6 +5757,10 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
return bnx2x_func_send_stop(bp, params); return bnx2x_func_send_stop(bp, params);
case BNX2X_F_CMD_HW_RESET: case BNX2X_F_CMD_HW_RESET:
return bnx2x_func_hw_reset(bp, params); return bnx2x_func_hw_reset(bp, params);
case BNX2X_F_CMD_AFEX_UPDATE:
return bnx2x_func_send_afex_update(bp, params);
case BNX2X_F_CMD_AFEX_VIFLISTS:
return bnx2x_func_send_afex_viflists(bp, params);
case BNX2X_F_CMD_TX_STOP: case BNX2X_F_CMD_TX_STOP:
return bnx2x_func_send_tx_stop(bp, params); return bnx2x_func_send_tx_stop(bp, params);
case BNX2X_F_CMD_TX_START: case BNX2X_F_CMD_TX_START:
...@@ -5676,6 +5774,7 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp, ...@@ -5676,6 +5774,7 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
void bnx2x_init_func_obj(struct bnx2x *bp, void bnx2x_init_func_obj(struct bnx2x *bp,
struct bnx2x_func_sp_obj *obj, struct bnx2x_func_sp_obj *obj,
void *rdata, dma_addr_t rdata_mapping, void *rdata, dma_addr_t rdata_mapping,
void *afex_rdata, dma_addr_t afex_rdata_mapping,
struct bnx2x_func_sp_drv_ops *drv_iface) struct bnx2x_func_sp_drv_ops *drv_iface)
{ {
memset(obj, 0, sizeof(*obj)); memset(obj, 0, sizeof(*obj));
...@@ -5684,7 +5783,8 @@ void bnx2x_init_func_obj(struct bnx2x *bp, ...@@ -5684,7 +5783,8 @@ void bnx2x_init_func_obj(struct bnx2x *bp,
obj->rdata = rdata; obj->rdata = rdata;
obj->rdata_mapping = rdata_mapping; obj->rdata_mapping = rdata_mapping;
obj->afex_rdata = afex_rdata;
obj->afex_rdata_mapping = afex_rdata_mapping;
obj->send_cmd = bnx2x_func_send_cmd; obj->send_cmd = bnx2x_func_send_cmd;
obj->check_transition = bnx2x_func_chk_transition; obj->check_transition = bnx2x_func_chk_transition;
obj->complete_cmd = bnx2x_func_comp_cmd; obj->complete_cmd = bnx2x_func_comp_cmd;
......
...@@ -62,6 +62,8 @@ enum { ...@@ -62,6 +62,8 @@ enum {
BNX2X_FILTER_MCAST_PENDING, BNX2X_FILTER_MCAST_PENDING,
BNX2X_FILTER_MCAST_SCHED, BNX2X_FILTER_MCAST_SCHED,
BNX2X_FILTER_RSS_CONF_PENDING, BNX2X_FILTER_RSS_CONF_PENDING,
BNX2X_AFEX_FCOE_Q_UPDATE_PENDING,
BNX2X_AFEX_PENDING_VIFSET_MCP_ACK
}; };
struct bnx2x_raw_obj { struct bnx2x_raw_obj {
...@@ -432,6 +434,8 @@ enum { ...@@ -432,6 +434,8 @@ enum {
BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
}; };
void bnx2x_set_mac_in_nig(struct bnx2x *bp,
bool add, unsigned char *dev_addr, int index);
/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
...@@ -798,7 +802,8 @@ enum { ...@@ -798,7 +802,8 @@ enum {
BNX2X_Q_FLG_TX_SWITCH, BNX2X_Q_FLG_TX_SWITCH,
BNX2X_Q_FLG_TX_SEC, BNX2X_Q_FLG_TX_SEC,
BNX2X_Q_FLG_ANTI_SPOOF, BNX2X_Q_FLG_ANTI_SPOOF,
BNX2X_Q_FLG_SILENT_VLAN_REM BNX2X_Q_FLG_SILENT_VLAN_REM,
BNX2X_Q_FLG_FORCE_DEFAULT_PRI
}; };
/* Queue type options: queue type may be a compination of below. */ /* Queue type options: queue type may be a compination of below. */
...@@ -960,6 +965,11 @@ struct bnx2x_queue_state_params { ...@@ -960,6 +965,11 @@ struct bnx2x_queue_state_params {
} params; } params;
}; };
struct bnx2x_viflist_params {
u8 echo_res;
u8 func_bit_map_res;
};
struct bnx2x_queue_sp_obj { struct bnx2x_queue_sp_obj {
u32 cids[BNX2X_MULTI_TX_COS]; u32 cids[BNX2X_MULTI_TX_COS];
u8 cl_id; u8 cl_id;
...@@ -1042,6 +1052,8 @@ enum bnx2x_func_cmd { ...@@ -1042,6 +1052,8 @@ enum bnx2x_func_cmd {
BNX2X_F_CMD_START, BNX2X_F_CMD_START,
BNX2X_F_CMD_STOP, BNX2X_F_CMD_STOP,
BNX2X_F_CMD_HW_RESET, BNX2X_F_CMD_HW_RESET,
BNX2X_F_CMD_AFEX_UPDATE,
BNX2X_F_CMD_AFEX_VIFLISTS,
BNX2X_F_CMD_TX_STOP, BNX2X_F_CMD_TX_STOP,
BNX2X_F_CMD_TX_START, BNX2X_F_CMD_TX_START,
BNX2X_F_CMD_MAX, BNX2X_F_CMD_MAX,
...@@ -1086,6 +1098,18 @@ struct bnx2x_func_start_params { ...@@ -1086,6 +1098,18 @@ struct bnx2x_func_start_params {
u8 network_cos_mode; u8 network_cos_mode;
}; };
struct bnx2x_func_afex_update_params {
u16 vif_id;
u16 afex_default_vlan;
u8 allowed_priorities;
};
struct bnx2x_func_afex_viflists_params {
u16 vif_list_index;
u8 func_bit_map;
u8 afex_vif_list_command;
u8 func_to_clear;
};
struct bnx2x_func_tx_start_params { struct bnx2x_func_tx_start_params {
struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
u8 dcb_enabled; u8 dcb_enabled;
...@@ -1107,6 +1131,8 @@ struct bnx2x_func_state_params { ...@@ -1107,6 +1131,8 @@ struct bnx2x_func_state_params {
struct bnx2x_func_hw_init_params hw_init; struct bnx2x_func_hw_init_params hw_init;
struct bnx2x_func_hw_reset_params hw_reset; struct bnx2x_func_hw_reset_params hw_reset;
struct bnx2x_func_start_params start; struct bnx2x_func_start_params start;
struct bnx2x_func_afex_update_params afex_update;
struct bnx2x_func_afex_viflists_params afex_viflists;
struct bnx2x_func_tx_start_params tx_start; struct bnx2x_func_tx_start_params tx_start;
} params; } params;
}; };
...@@ -1151,6 +1177,13 @@ struct bnx2x_func_sp_obj { ...@@ -1151,6 +1177,13 @@ struct bnx2x_func_sp_obj {
void *rdata; void *rdata;
dma_addr_t rdata_mapping; dma_addr_t rdata_mapping;
/* Buffer to use as a afex ramrod data and its mapping.
* This can't be same rdata as above because afex ramrod requests
* can arrive to the object in parallel to other ramrod requests.
*/
void *afex_rdata;
dma_addr_t afex_rdata_mapping;
/* this mutex validates that when pending flag is taken, the next /* this mutex validates that when pending flag is taken, the next
* ramrod to be sent will be the one set the pending bit * ramrod to be sent will be the one set the pending bit
*/ */
...@@ -1194,6 +1227,7 @@ union bnx2x_qable_obj { ...@@ -1194,6 +1227,7 @@ union bnx2x_qable_obj {
void bnx2x_init_func_obj(struct bnx2x *bp, void bnx2x_init_func_obj(struct bnx2x *bp,
struct bnx2x_func_sp_obj *obj, struct bnx2x_func_sp_obj *obj,
void *rdata, dma_addr_t rdata_mapping, void *rdata, dma_addr_t rdata_mapping,
void *afex_rdata, dma_addr_t afex_rdata_mapping,
struct bnx2x_func_sp_drv_ops *drv_iface); struct bnx2x_func_sp_drv_ops *drv_iface);
int bnx2x_func_state_change(struct bnx2x *bp, int bnx2x_func_state_change(struct bnx2x *bp,
......
...@@ -1561,3 +1561,274 @@ void bnx2x_save_statistics(struct bnx2x *bp) ...@@ -1561,3 +1561,274 @@ void bnx2x_save_statistics(struct bnx2x *bp)
UPDATE_FW_STAT_OLD(mac_discard); UPDATE_FW_STAT_OLD(mac_discard);
} }
} }
void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
u32 stats_type)
{
int i;
struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
struct bnx2x_eth_stats *estats = &bp->eth_stats;
struct per_queue_stats *fcoe_q_stats =
&bp->fw_stats_data->queue_stats[FCOE_IDX];
struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
&fcoe_q_stats->tstorm_queue_statistics;
struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
&fcoe_q_stats->ustorm_queue_statistics;
struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
&fcoe_q_stats->xstorm_queue_statistics;
struct fcoe_statistics_params *fw_fcoe_stat =
&bp->fw_stats_data->fcoe;
memset(afex_stats, 0, sizeof(struct afex_stats));
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
ADD_64(afex_stats->rx_unicast_bytes_hi,
qstats->total_unicast_bytes_received_hi,
afex_stats->rx_unicast_bytes_lo,
qstats->total_unicast_bytes_received_lo);
ADD_64(afex_stats->rx_broadcast_bytes_hi,
qstats->total_broadcast_bytes_received_hi,
afex_stats->rx_broadcast_bytes_lo,
qstats->total_broadcast_bytes_received_lo);
ADD_64(afex_stats->rx_multicast_bytes_hi,
qstats->total_multicast_bytes_received_hi,
afex_stats->rx_multicast_bytes_lo,
qstats->total_multicast_bytes_received_lo);
ADD_64(afex_stats->rx_unicast_frames_hi,
qstats->total_unicast_packets_received_hi,
afex_stats->rx_unicast_frames_lo,
qstats->total_unicast_packets_received_lo);
ADD_64(afex_stats->rx_broadcast_frames_hi,
qstats->total_broadcast_packets_received_hi,
afex_stats->rx_broadcast_frames_lo,
qstats->total_broadcast_packets_received_lo);
ADD_64(afex_stats->rx_multicast_frames_hi,
qstats->total_multicast_packets_received_hi,
afex_stats->rx_multicast_frames_lo,
qstats->total_multicast_packets_received_lo);
/* sum to rx_frames_discarded all discraded
* packets due to size, ttl0 and checksum
*/
ADD_64(afex_stats->rx_frames_discarded_hi,
qstats->total_packets_received_checksum_discarded_hi,
afex_stats->rx_frames_discarded_lo,
qstats->total_packets_received_checksum_discarded_lo);
ADD_64(afex_stats->rx_frames_discarded_hi,
qstats->total_packets_received_ttl0_discarded_hi,
afex_stats->rx_frames_discarded_lo,
qstats->total_packets_received_ttl0_discarded_lo);
ADD_64(afex_stats->rx_frames_discarded_hi,
qstats->etherstatsoverrsizepkts_hi,
afex_stats->rx_frames_discarded_lo,
qstats->etherstatsoverrsizepkts_lo);
ADD_64(afex_stats->rx_frames_dropped_hi,
qstats->no_buff_discard_hi,
afex_stats->rx_frames_dropped_lo,
qstats->no_buff_discard_lo);
ADD_64(afex_stats->tx_unicast_bytes_hi,
qstats->total_unicast_bytes_transmitted_hi,
afex_stats->tx_unicast_bytes_lo,
qstats->total_unicast_bytes_transmitted_lo);
ADD_64(afex_stats->tx_broadcast_bytes_hi,
qstats->total_broadcast_bytes_transmitted_hi,
afex_stats->tx_broadcast_bytes_lo,
qstats->total_broadcast_bytes_transmitted_lo);
ADD_64(afex_stats->tx_multicast_bytes_hi,
qstats->total_multicast_bytes_transmitted_hi,
afex_stats->tx_multicast_bytes_lo,
qstats->total_multicast_bytes_transmitted_lo);
ADD_64(afex_stats->tx_unicast_frames_hi,
qstats->total_unicast_packets_transmitted_hi,
afex_stats->tx_unicast_frames_lo,
qstats->total_unicast_packets_transmitted_lo);
ADD_64(afex_stats->tx_broadcast_frames_hi,
qstats->total_broadcast_packets_transmitted_hi,
afex_stats->tx_broadcast_frames_lo,
qstats->total_broadcast_packets_transmitted_lo);
ADD_64(afex_stats->tx_multicast_frames_hi,
qstats->total_multicast_packets_transmitted_hi,
afex_stats->tx_multicast_frames_lo,
qstats->total_multicast_packets_transmitted_lo);
ADD_64(afex_stats->tx_frames_dropped_hi,
qstats->total_transmitted_dropped_packets_error_hi,
afex_stats->tx_frames_dropped_lo,
qstats->total_transmitted_dropped_packets_error_lo);
}
/* now add FCoE statistics which are collected separately
* (both offloaded and non offloaded)
*/
if (!NO_FCOE(bp)) {
ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
LE32_0,
afex_stats->rx_unicast_bytes_lo,
fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
afex_stats->rx_unicast_bytes_lo,
fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
afex_stats->rx_broadcast_bytes_lo,
fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
afex_stats->rx_multicast_bytes_lo,
fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
ADD_64_LE(afex_stats->rx_unicast_frames_hi,
LE32_0,
afex_stats->rx_unicast_frames_lo,
fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
ADD_64_LE(afex_stats->rx_unicast_frames_hi,
LE32_0,
afex_stats->rx_unicast_frames_lo,
fcoe_q_tstorm_stats->rcv_ucast_pkts);
ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
LE32_0,
afex_stats->rx_broadcast_frames_lo,
fcoe_q_tstorm_stats->rcv_bcast_pkts);
ADD_64_LE(afex_stats->rx_multicast_frames_hi,
LE32_0,
afex_stats->rx_multicast_frames_lo,
fcoe_q_tstorm_stats->rcv_ucast_pkts);
ADD_64_LE(afex_stats->rx_frames_discarded_hi,
LE32_0,
afex_stats->rx_frames_discarded_lo,
fcoe_q_tstorm_stats->checksum_discard);
ADD_64_LE(afex_stats->rx_frames_discarded_hi,
LE32_0,
afex_stats->rx_frames_discarded_lo,
fcoe_q_tstorm_stats->pkts_too_big_discard);
ADD_64_LE(afex_stats->rx_frames_discarded_hi,
LE32_0,
afex_stats->rx_frames_discarded_lo,
fcoe_q_tstorm_stats->ttl0_discard);
ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
LE16_0,
afex_stats->rx_frames_dropped_lo,
fcoe_q_tstorm_stats->no_buff_discard);
ADD_64_LE(afex_stats->rx_frames_dropped_hi,
LE32_0,
afex_stats->rx_frames_dropped_lo,
fcoe_q_ustorm_stats->ucast_no_buff_pkts);
ADD_64_LE(afex_stats->rx_frames_dropped_hi,
LE32_0,
afex_stats->rx_frames_dropped_lo,
fcoe_q_ustorm_stats->mcast_no_buff_pkts);
ADD_64_LE(afex_stats->rx_frames_dropped_hi,
LE32_0,
afex_stats->rx_frames_dropped_lo,
fcoe_q_ustorm_stats->bcast_no_buff_pkts);
ADD_64_LE(afex_stats->rx_frames_dropped_hi,
LE32_0,
afex_stats->rx_frames_dropped_lo,
fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
ADD_64_LE(afex_stats->rx_frames_dropped_hi,
LE32_0,
afex_stats->rx_frames_dropped_lo,
fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
LE32_0,
afex_stats->tx_unicast_bytes_lo,
fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
afex_stats->tx_unicast_bytes_lo,
fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
afex_stats->tx_broadcast_bytes_lo,
fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
afex_stats->tx_multicast_bytes_lo,
fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
ADD_64_LE(afex_stats->tx_unicast_frames_hi,
LE32_0,
afex_stats->tx_unicast_frames_lo,
fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
ADD_64_LE(afex_stats->tx_unicast_frames_hi,
LE32_0,
afex_stats->tx_unicast_frames_lo,
fcoe_q_xstorm_stats->ucast_pkts_sent);
ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
LE32_0,
afex_stats->tx_broadcast_frames_lo,
fcoe_q_xstorm_stats->bcast_pkts_sent);
ADD_64_LE(afex_stats->tx_multicast_frames_hi,
LE32_0,
afex_stats->tx_multicast_frames_lo,
fcoe_q_xstorm_stats->mcast_pkts_sent);
ADD_64_LE(afex_stats->tx_frames_dropped_hi,
LE32_0,
afex_stats->tx_frames_dropped_lo,
fcoe_q_xstorm_stats->error_drop_pkts);
}
/* if port stats are requested, add them to the PMF
* stats, as anyway they will be accumulated by the
* MCP before sent to the switch
*/
if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
ADD_64(afex_stats->rx_frames_dropped_hi,
0,
afex_stats->rx_frames_dropped_lo,
estats->mac_filter_discard);
ADD_64(afex_stats->rx_frames_dropped_hi,
0,
afex_stats->rx_frames_dropped_lo,
estats->brb_truncate_discard);
ADD_64(afex_stats->rx_frames_discarded_hi,
0,
afex_stats->rx_frames_discarded_lo,
estats->mac_discard);
}
}
...@@ -338,6 +338,18 @@ struct bnx2x_fw_port_stats_old { ...@@ -338,6 +338,18 @@ struct bnx2x_fw_port_stats_old {
s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
} while (0) } while (0)
#define LE32_0 ((__force __le32) 0)
#define LE16_0 ((__force __le16) 0)
/* The _force is for cases where high value is 0 */
#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \
ADD_64(s_hi, le32_to_cpu(a_hi_le), \
s_lo, le32_to_cpu(a_lo_le))
#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \
ADD_64(s_hi, le16_to_cpu(a_hi_le), \
s_lo, le16_to_cpu(a_lo_le))
/* difference = minuend - subtrahend */ /* difference = minuend - subtrahend */
#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
do { \ do { \
...@@ -529,4 +541,7 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); ...@@ -529,4 +541,7 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
* @bp: driver handle * @bp: driver handle
*/ */
void bnx2x_save_statistics(struct bnx2x *bp); void bnx2x_save_statistics(struct bnx2x *bp);
void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
u32 stats_type);
#endif /* BNX2X_STATS_H */ #endif /* BNX2X_STATS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment