Commit db0aeb31 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-Add-support-for-new-multi-partitioning-modes'

Sudarsana Reddy Kalluru says:

====================
qed*: Add support for new multi partitioning modes.

The patch series simplifies the multi function (MF) mode implementation of
qed/qede drivers, and adds support for new MF modes.

Please consider applying it to net-next branch.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3a443bd6 cac6f691
...@@ -439,6 +439,59 @@ struct qed_fw_data { ...@@ -439,6 +439,59 @@ struct qed_fw_data {
u32 init_ops_size; u32 init_ops_size;
}; };
enum qed_mf_mode_bit {
/* Supports PF-classification based on tag */
QED_MF_OVLAN_CLSS,
/* Supports PF-classification based on MAC */
QED_MF_LLH_MAC_CLSS,
/* Supports PF-classification based on protocol type */
QED_MF_LLH_PROTO_CLSS,
/* Requires a default PF to be set */
QED_MF_NEED_DEF_PF,
/* Allow LL2 to multicast/broadcast */
QED_MF_LL2_NON_UNICAST,
/* Allow Cross-PF [& child VFs] Tx-switching */
QED_MF_INTER_PF_SWITCH,
/* Unified Fabtic Port support enabled */
QED_MF_UFP_SPECIFIC,
/* Disable Accelerated Receive Flow Steering (aRFS) */
QED_MF_DISABLE_ARFS,
/* Use vlan for steering */
QED_MF_8021Q_TAGGING,
/* Use stag for steering */
QED_MF_8021AD_TAGGING,
/* Allow DSCP to TC mapping */
QED_MF_DSCP_TO_TC_MAP,
};
enum qed_ufp_mode {
QED_UFP_MODE_ETS,
QED_UFP_MODE_VNIC_BW,
QED_UFP_MODE_UNKNOWN
};
enum qed_ufp_pri_type {
QED_UFP_PRI_OS,
QED_UFP_PRI_VNIC,
QED_UFP_PRI_UNKNOWN
};
struct qed_ufp_info {
enum qed_ufp_pri_type pri_type;
enum qed_ufp_mode mode;
u8 tc;
};
enum BAR_ID { enum BAR_ID {
BAR_ID_0, /* used for GRC */ BAR_ID_0, /* used for GRC */
BAR_ID_1 /* Used for doorbells */ BAR_ID_1 /* Used for doorbells */
...@@ -547,6 +600,8 @@ struct qed_hwfn { ...@@ -547,6 +600,8 @@ struct qed_hwfn {
struct qed_dcbx_info *p_dcbx_info; struct qed_dcbx_info *p_dcbx_info;
struct qed_ufp_info ufp_info;
struct qed_dmae_info dmae_info; struct qed_dmae_info dmae_info;
/* QM init */ /* QM init */
...@@ -669,10 +724,8 @@ struct qed_dev { ...@@ -669,10 +724,8 @@ struct qed_dev {
u8 num_funcs_in_port; u8 num_funcs_in_port;
u8 path_id; u8 path_id;
enum qed_mf_mode mf_mode;
#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT) unsigned long mf_bits;
#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
int pcie_width; int pcie_width;
int pcie_speed; int pcie_speed;
......
...@@ -274,8 +274,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, ...@@ -274,8 +274,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
u32 pri_tc_tbl, int count, u8 dcbx_version) u32 pri_tc_tbl, int count, u8 dcbx_version)
{ {
enum dcbx_protocol_type type; enum dcbx_protocol_type type;
bool enable, ieee, eth_tlv;
u8 tc, priority_map; u8 tc, priority_map;
bool enable, ieee;
u16 protocol_id; u16 protocol_id;
int priority; int priority;
int i; int i;
...@@ -283,6 +283,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, ...@@ -283,6 +283,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
eth_tlv = false;
/* Parse APP TLV */ /* Parse APP TLV */
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
...@@ -304,13 +305,22 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, ...@@ -304,13 +305,22 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
* indication, but we only got here if there was an * indication, but we only got here if there was an
* app tlv for the protocol, so dcbx must be enabled. * app tlv for the protocol, so dcbx must be enabled.
*/ */
enable = !(type == DCBX_PROTOCOL_ETH); if (type == DCBX_PROTOCOL_ETH) {
enable = false;
eth_tlv = true;
} else {
enable = true;
}
qed_dcbx_update_app_info(p_data, p_hwfn, enable, qed_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type); priority, tc, type);
} }
} }
/* If Eth TLV is not detected, use UFP TC as default TC */
if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && !eth_tlv)
p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc;
/* Update ramrod protocol data and hw_info fields /* Update ramrod protocol data and hw_info fields
* with default info when corresponding APP TLV's are not detected. * with default info when corresponding APP TLV's are not detected.
* The enabled field has a different logic for ethernet as only for * The enabled field has a different logic for ethernet as only for
......
...@@ -1149,18 +1149,10 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) ...@@ -1149,18 +1149,10 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
return -EINVAL; return -EINVAL;
} }
switch (p_hwfn->cdev->mf_mode) { if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
case QED_MF_DEFAULT:
case QED_MF_NPAR:
hw_mode |= 1 << MODE_MF_SI;
break;
case QED_MF_OVLAN:
hw_mode |= 1 << MODE_MF_SD; hw_mode |= 1 << MODE_MF_SD;
break; else
default:
DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
hw_mode |= 1 << MODE_MF_SI; hw_mode |= 1 << MODE_MF_SI;
}
hw_mode |= 1 << MODE_ASIC; hw_mode |= 1 << MODE_ASIC;
...@@ -1507,6 +1499,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, ...@@ -1507,6 +1499,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
p_hwfn->hw_info.ovlan); p_hwfn->hw_info.ovlan);
DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
"Configuring LLH_FUNC_FILTER_HDR_SEL\n");
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET,
1);
} }
/* Enable classification by MAC if needed */ /* Enable classification by MAC if needed */
...@@ -1557,7 +1554,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, ...@@ -1557,7 +1554,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* send function start command */ /* send function start command */
rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn, rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn,
p_hwfn->cdev->mf_mode,
allow_npar_tx_switch); allow_npar_tx_switch);
if (rc) { if (rc) {
DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
...@@ -1644,6 +1640,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) ...@@ -1644,6 +1640,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
bool b_default_mtu = true; bool b_default_mtu = true;
struct qed_hwfn *p_hwfn; struct qed_hwfn *p_hwfn;
int rc = 0, mfw_rc, i; int rc = 0, mfw_rc, i;
u16 ether_type;
if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
...@@ -1677,6 +1674,24 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) ...@@ -1677,6 +1674,24 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
if (rc) if (rc)
return rc; return rc;
if (IS_PF(cdev) && (test_bit(QED_MF_8021Q_TAGGING,
&cdev->mf_bits) ||
test_bit(QED_MF_8021AD_TAGGING,
&cdev->mf_bits))) {
if (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits))
ether_type = ETH_P_8021Q;
else
ether_type = ETH_P_8021AD;
STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
ether_type);
STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
ether_type);
STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
ether_type);
STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
ether_type);
}
qed_fill_load_req_params(&load_req_params, qed_fill_load_req_params(&load_req_params,
p_params->p_drv_load_params); p_params->p_drv_load_params);
rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
...@@ -2639,31 +2654,57 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -2639,31 +2654,57 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
link->pause.autoneg, link->pause.autoneg,
p_caps->default_eee, p_caps->eee_lpi_timer); p_caps->default_eee, p_caps->eee_lpi_timer);
/* Read Multi-function information from shmem */ if (IS_LEAD_HWFN(p_hwfn)) {
addr = MCP_REG_SCRATCH + nvm_cfg1_offset + struct qed_dev *cdev = p_hwfn->cdev;
offsetof(struct nvm_cfg1, glob) +
offsetof(struct nvm_cfg1_glob, generic_cont0);
generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); /* Read Multi-function information from shmem */
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
offsetof(struct nvm_cfg1, glob) +
offsetof(struct nvm_cfg1_glob, generic_cont0);
mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
NVM_CFG1_GLOB_MF_MODE_OFFSET;
switch (mf_mode) { mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: NVM_CFG1_GLOB_MF_MODE_OFFSET;
p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
break; switch (mf_mode) {
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
p_hwfn->cdev->mf_mode = QED_MF_NPAR; cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS);
break; break;
case NVM_CFG1_GLOB_MF_MODE_DEFAULT: case NVM_CFG1_GLOB_MF_MODE_UFP:
p_hwfn->cdev->mf_mode = QED_MF_DEFAULT; cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
break; BIT(QED_MF_LLH_PROTO_CLSS) |
BIT(QED_MF_UFP_SPECIFIC) |
BIT(QED_MF_8021Q_TAGGING);
break;
case NVM_CFG1_GLOB_MF_MODE_BD:
cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
BIT(QED_MF_LLH_PROTO_CLSS) |
BIT(QED_MF_8021AD_TAGGING);
break;
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
BIT(QED_MF_LLH_PROTO_CLSS) |
BIT(QED_MF_LL2_NON_UNICAST) |
BIT(QED_MF_INTER_PF_SWITCH);
break;
case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
BIT(QED_MF_LLH_PROTO_CLSS) |
BIT(QED_MF_LL2_NON_UNICAST);
if (QED_IS_BB(p_hwfn->cdev))
cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF);
break;
}
DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
cdev->mf_bits);
} }
DP_INFO(p_hwfn, "Multi function mode is %08x\n",
p_hwfn->cdev->mf_mode);
/* Read Multi-function information from shmem */ DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
p_hwfn->cdev->mf_bits);
/* Read device capabilities information from shmem */
addr = MCP_REG_SCRATCH + nvm_cfg1_offset + addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
offsetof(struct nvm_cfg1, glob) + offsetof(struct nvm_cfg1, glob) +
offsetof(struct nvm_cfg1_glob, device_capabilities); offsetof(struct nvm_cfg1_glob, device_capabilities);
...@@ -2856,6 +2897,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, ...@@ -2856,6 +2897,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
qed_mcp_cmd_port_init(p_hwfn, p_ptt); qed_mcp_cmd_port_init(p_hwfn, p_ptt);
qed_get_eee_caps(p_hwfn, p_ptt); qed_get_eee_caps(p_hwfn, p_ptt);
qed_mcp_read_ufp_config(p_hwfn, p_ptt);
} }
if (qed_mcp_is_init(p_hwfn)) { if (qed_mcp_is_init(p_hwfn)) {
...@@ -3462,7 +3505,7 @@ int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn, ...@@ -3462,7 +3505,7 @@ int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
u32 high = 0, low = 0, en; u32 high = 0, low = 0, en;
int i; int i;
if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
return 0; return 0;
qed_llh_mac_to_filter(&high, &low, p_filter); qed_llh_mac_to_filter(&high, &low, p_filter);
...@@ -3507,7 +3550,7 @@ void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, ...@@ -3507,7 +3550,7 @@ void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
u32 high = 0, low = 0; u32 high = 0, low = 0;
int i; int i;
if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits))
return; return;
qed_llh_mac_to_filter(&high, &low, p_filter); qed_llh_mac_to_filter(&high, &low, p_filter);
...@@ -3549,7 +3592,7 @@ qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn, ...@@ -3549,7 +3592,7 @@ qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
u32 high = 0, low = 0, en; u32 high = 0, low = 0, en;
int i; int i;
if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
return 0; return 0;
switch (type) { switch (type) {
...@@ -3647,7 +3690,7 @@ qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn, ...@@ -3647,7 +3690,7 @@ qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
u32 high = 0, low = 0; u32 high = 0, low = 0;
int i; int i;
if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits))
return; return;
switch (type) { switch (type) {
......
...@@ -313,6 +313,9 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, ...@@ -313,6 +313,9 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
p_data->d_id.addr_mid = p_conn->d_id.addr_mid; p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
p_data->d_id.addr_lo = p_conn->d_id.addr_lo; p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
p_data->flags = p_conn->flags; p_data->flags = p_conn->flags;
if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
SET_FIELD(p_data->flags,
FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1);
p_data->def_q_idx = p_conn->def_q_idx; p_data->def_q_idx = p_conn->def_q_idx;
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
......
...@@ -11993,6 +11993,16 @@ struct public_port { ...@@ -11993,6 +11993,16 @@ struct public_port {
#define EEE_REMOTE_TW_TX_OFFSET 0 #define EEE_REMOTE_TW_TX_OFFSET 0
#define EEE_REMOTE_TW_RX_MASK 0xffff0000 #define EEE_REMOTE_TW_RX_MASK 0xffff0000
#define EEE_REMOTE_TW_RX_OFFSET 16 #define EEE_REMOTE_TW_RX_OFFSET 16
u32 oem_cfg_port;
#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003
#define OEM_CFG_CHANNEL_TYPE_OFFSET 0
#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1
#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2
#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C
#define OEM_CFG_SCHED_TYPE_OFFSET 2
#define OEM_CFG_SCHED_TYPE_ETS 0x1
#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2
}; };
struct public_func { struct public_func {
...@@ -12069,6 +12079,23 @@ struct public_func { ...@@ -12069,6 +12079,23 @@ struct public_func {
#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 #define DRV_ID_DRV_INIT_HW_MASK 0x80000000
#define DRV_ID_DRV_INIT_HW_SHIFT 31 #define DRV_ID_DRV_INIT_HW_SHIFT 31
#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT) #define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
u32 oem_cfg_func;
#define OEM_CFG_FUNC_TC_MASK 0x0000000F
#define OEM_CFG_FUNC_TC_OFFSET 0
#define OEM_CFG_FUNC_TC_0 0x0
#define OEM_CFG_FUNC_TC_1 0x1
#define OEM_CFG_FUNC_TC_2 0x2
#define OEM_CFG_FUNC_TC_3 0x3
#define OEM_CFG_FUNC_TC_4 0x4
#define OEM_CFG_FUNC_TC_5 0x5
#define OEM_CFG_FUNC_TC_6 0x6
#define OEM_CFG_FUNC_TC_7 0x7
#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030
#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4
#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1
#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2
}; };
struct mcp_mac { struct mcp_mac {
...@@ -12495,6 +12522,7 @@ enum MFW_DRV_MSG_TYPE { ...@@ -12495,6 +12522,7 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_BW_UPDATE10, MFW_DRV_MSG_BW_UPDATE10,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_BW_UPDATE11, MFW_DRV_MSG_BW_UPDATE11,
MFW_DRV_MSG_OEM_CFG_UPDATE,
MFW_DRV_MSG_MAX MFW_DRV_MSG_MAX
}; };
......
...@@ -919,12 +919,16 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -919,12 +919,16 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
p_ramrod->inner_vlan_stripping_en = p_ramrod->inner_vlan_stripping_en =
p_ll2_conn->input.rx_vlan_removal_en; p_ll2_conn->input.rx_vlan_removal_en;
if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE)
p_ramrod->report_outer_vlan = 1;
p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) && p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
(conn_type != QED_LL2_TYPE_IWARP)) { conn_type != QED_LL2_TYPE_IWARP) {
p_ramrod->mf_si_bcast_accept_all = 1; p_ramrod->mf_si_bcast_accept_all = 1;
p_ramrod->mf_si_mcast_accept_all = 1; p_ramrod->mf_si_mcast_accept_all = 1;
} else { } else {
...@@ -1493,11 +1497,12 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) ...@@ -1493,11 +1497,12 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
qed_llh_add_protocol_filter(p_hwfn, p_ptt,
ETH_P_FCOE, 0,
QED_LLH_FILTER_ETHERTYPE);
qed_llh_add_protocol_filter(p_hwfn, p_ptt, qed_llh_add_protocol_filter(p_hwfn, p_ptt,
0x8906, 0, ETH_P_FIP, 0,
QED_LLH_FILTER_ETHERTYPE);
qed_llh_add_protocol_filter(p_hwfn, p_ptt,
0x8914, 0,
QED_LLH_FILTER_ETHERTYPE); QED_LLH_FILTER_ETHERTYPE);
} }
...@@ -1653,11 +1658,16 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, ...@@ -1653,11 +1658,16 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
if (QED_IS_IWARP_PERSONALITY(p_hwfn) && if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
p_ll2->input.conn_type == QED_LL2_TYPE_OOO) p_ll2->input.conn_type == QED_LL2_TYPE_OOO) {
start_bd->nw_vlan_or_lb_echo = start_bd->nw_vlan_or_lb_echo =
cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE); cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
else } else {
start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan); start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
p_ll2->input.conn_type == QED_LL2_TYPE_FCOE)
pkt->remove_stag = true;
}
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
cpu_to_le16(pkt->l4_hdr_offset_w)); cpu_to_le16(pkt->l4_hdr_offset_w));
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
...@@ -1668,6 +1678,9 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, ...@@ -1668,6 +1678,9 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum)); SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum)); SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len)); SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION,
!!(pkt->remove_stag));
start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data); start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag); DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
start_bd->nbytes = cpu_to_le16(pkt->first_frag_len); start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
...@@ -1884,11 +1897,12 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) ...@@ -1884,11 +1897,12 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
ETH_P_FCOE, 0,
QED_LLH_FILTER_ETHERTYPE);
qed_llh_remove_protocol_filter(p_hwfn, p_ptt, qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
0x8906, 0, ETH_P_FIP, 0,
QED_LLH_FILTER_ETHERTYPE);
qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
0x8914, 0,
QED_LLH_FILTER_ETHERTYPE); QED_LLH_FILTER_ETHERTYPE);
} }
...@@ -2360,7 +2374,8 @@ static int qed_ll2_stop(struct qed_dev *cdev) ...@@ -2360,7 +2374,8 @@ static int qed_ll2_stop(struct qed_dev *cdev)
return -EINVAL; return -EINVAL;
} }
static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
unsigned long xmit_flags)
{ {
struct qed_ll2_tx_pkt_info pkt; struct qed_ll2_tx_pkt_info pkt;
const skb_frag_t *frag; const skb_frag_t *frag;
...@@ -2405,6 +2420,9 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) ...@@ -2405,6 +2420,9 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
pkt.first_frag = mapping; pkt.first_frag = mapping;
pkt.first_frag_len = skb->len; pkt.first_frag_len = skb->len;
pkt.cookie = skb; pkt.cookie = skb;
if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) &&
test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
pkt.remove_stag = true;
rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
&pkt, 1); &pkt, 1);
......
...@@ -264,7 +264,6 @@ int qed_fill_dev_info(struct qed_dev *cdev, ...@@ -264,7 +264,6 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq; dev_info->pci_irq = cdev->pci_params.irq;
dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
dev_info->dev_type = cdev->type; dev_info->dev_type = cdev->type;
ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
...@@ -273,7 +272,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, ...@@ -273,7 +272,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->fw_minor = FW_MINOR_VERSION; dev_info->fw_minor = FW_MINOR_VERSION;
dev_info->fw_rev = FW_REVISION_VERSION; dev_info->fw_rev = FW_REVISION_VERSION;
dev_info->fw_eng = FW_ENGINEERING_VERSION; dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->mf_mode = cdev->mf_mode; dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
&cdev->mf_bits);
dev_info->tx_switching = true; dev_info->tx_switching = true;
if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include "qed.h" #include "qed.h"
#include "qed_cxt.h"
#include "qed_dcbx.h" #include "qed_dcbx.h"
#include "qed_hsi.h" #include "qed_hsi.h"
#include "qed_hw.h" #include "qed_hw.h"
...@@ -1486,6 +1487,80 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -1486,6 +1487,80 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
&resp, &param); &resp, &param);
} }
void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct public_func shmem_info;
u32 port_cfg, val;
if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
return;
memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, oem_cfg_port));
val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
OEM_CFG_CHANNEL_TYPE_OFFSET;
if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val);
val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
if (val == OEM_CFG_SCHED_TYPE_ETS) {
p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
} else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
} else {
p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val);
}
qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
val = (port_cfg & OEM_CFG_FUNC_TC_MASK) >> OEM_CFG_FUNC_TC_OFFSET;
p_hwfn->ufp_info.tc = (u8)val;
val = (port_cfg & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
} else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
} else {
p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val);
}
DP_NOTICE(p_hwfn,
"UFP shmem config: mode = %d tc = %d pri_type = %d\n",
p_hwfn->ufp_info.mode,
p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type);
}
static int
qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
qed_mcp_read_ufp_config(p_hwfn, p_ptt);
if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
qed_qm_reconf(p_hwfn, p_ptt);
} else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
/* Merge UFP TC with the dcbx TC data */
qed_dcbx_mib_update_event(p_hwfn, p_ptt,
QED_DCBX_OPERATIONAL_MIB);
} else {
DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
return -EINVAL;
}
/* update storm FW with negotiation results */
qed_sp_pf_update_ufp(p_hwfn);
/* update stag pcp value */
qed_sp_pf_update_stag(p_hwfn);
return 0;
}
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt) struct qed_ptt *p_ptt)
{ {
...@@ -1529,6 +1604,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, ...@@ -1529,6 +1604,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
qed_dcbx_mib_update_event(p_hwfn, p_ptt, qed_dcbx_mib_update_event(p_hwfn, p_ptt,
QED_DCBX_OPERATIONAL_MIB); QED_DCBX_OPERATIONAL_MIB);
break; break;
case MFW_DRV_MSG_OEM_CFG_UPDATE:
qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
break;
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break; break;
......
...@@ -1004,6 +1004,14 @@ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); ...@@ -1004,6 +1004,14 @@ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
*/ */
int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief Read ufp config from the shared memory.
*
* @param p_hwfn
* @param p_ptt
*/
void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/** /**
* @brief Populate the nvm info shadow in the given hardware function * @brief Populate the nvm info shadow in the given hardware function
* *
......
...@@ -416,7 +416,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -416,7 +416,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* @param p_hwfn * @param p_hwfn
* @param p_ptt * @param p_ptt
* @param p_tunn * @param p_tunn
* @param mode
* @param allow_npar_tx_switch * @param allow_npar_tx_switch
* *
* @return int * @return int
...@@ -425,7 +424,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -425,7 +424,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn, struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch); bool allow_npar_tx_switch);
/** /**
* @brief qed_sp_pf_update - PF Function Update Ramrod * @brief qed_sp_pf_update - PF Function Update Ramrod
...@@ -463,6 +462,15 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn); ...@@ -463,6 +462,15 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn);
* @return int * @return int
*/ */
/**
* @brief qed_sp_pf_update_ufp - PF ufp update Ramrod
*
* @param p_hwfn
*
* @return int
*/
int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn);
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
......
...@@ -306,7 +306,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, ...@@ -306,7 +306,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn, struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch) bool allow_npar_tx_switch)
{ {
struct pf_start_ramrod_data *p_ramrod = NULL; struct pf_start_ramrod_data *p_ramrod = NULL;
u16 sb = qed_int_get_sp_sb_id(p_hwfn); u16 sb = qed_int_get_sp_sb_id(p_hwfn);
...@@ -314,7 +314,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -314,7 +314,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data; struct qed_sp_init_data init_data;
int rc = -EINVAL; int rc = -EINVAL;
u8 page_cnt; u8 page_cnt, i;
/* update initial eq producer */ /* update initial eq producer */
qed_eq_prod_update(p_hwfn, qed_eq_prod_update(p_hwfn,
...@@ -339,21 +339,36 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -339,21 +339,36 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->dont_log_ramrods = 0; p_ramrod->dont_log_ramrods = 0;
p_ramrod->log_type_mask = cpu_to_le16(0xf); p_ramrod->log_type_mask = cpu_to_le16(0xf);
switch (mode) { if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
case QED_MF_DEFAULT:
case QED_MF_NPAR:
p_ramrod->mf_mode = MF_NPAR;
break;
case QED_MF_OVLAN:
p_ramrod->mf_mode = MF_OVLAN; p_ramrod->mf_mode = MF_OVLAN;
break; else
default:
DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
p_ramrod->mf_mode = MF_NPAR; p_ramrod->mf_mode = MF_NPAR;
}
p_ramrod->outer_tag_config.outer_tag.tci = p_ramrod->outer_tag_config.outer_tag.tci =
cpu_to_le16(p_hwfn->hw_info.ovlan); cpu_to_le16(p_hwfn->hw_info.ovlan);
if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) {
p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q;
} else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) {
p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD;
p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
}
p_ramrod->outer_tag_config.pri_map_valid = 1;
for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;
/* enable_stag_pri_change should be set if port is in BD mode or,
* UFP with Host Control mode.
*/
if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) {
if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
else
p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
p_ramrod->outer_tag_config.outer_tag.tci |=
cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
}
/* Place EQ address in RAMROD */ /* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
...@@ -365,7 +380,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -365,7 +380,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
if (IS_MF_SI(p_hwfn)) if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
switch (p_hwfn->hw_info.personality) { switch (p_hwfn->hw_info.personality) {
...@@ -434,6 +449,39 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn) ...@@ -434,6 +449,39 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EOPNOTSUPP;
if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) {
DP_INFO(p_hwfn, "Invalid priority type %d\n",
p_hwfn->ufp_info.pri_type);
return -EINVAL;
}
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_CB;
rc = qed_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
&init_data);
if (rc)
return rc;
p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
else
p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
/* Set pf update ramrod command params */ /* Set pf update ramrod command params */
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
......
...@@ -199,7 +199,7 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) ...@@ -199,7 +199,7 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
/* Enable/Disable Tx switching for PF */ /* Enable/Disable Tx switching for PF */
if ((rc == num_vfs_param) && netif_running(edev->ndev) && if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) { !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
vport_params->vport_id = 0; vport_params->vport_id = 0;
vport_params->update_tx_switching_flg = 1; vport_params->update_tx_switching_flg = 1;
vport_params->tx_switching_flg = num_vfs_param ? 1 : 0; vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
...@@ -1928,7 +1928,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) ...@@ -1928,7 +1928,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
vport_update_params->update_vport_active_flg = 1; vport_update_params->update_vport_active_flg = 1;
vport_update_params->vport_active_flg = 1; vport_update_params->vport_active_flg = 1;
if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) && if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
qed_info->tx_switching) { qed_info->tx_switching) {
vport_update_params->update_tx_switching_flg = 1; vport_update_params->update_tx_switching_flg = 1;
vport_update_params->tx_switching_flg = 1; vport_update_params->tx_switching_flg = 1;
......
...@@ -23,6 +23,7 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf) ...@@ -23,6 +23,7 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
struct fip_vlan *vlan; struct fip_vlan *vlan;
#define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 }) #define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS; static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS;
unsigned long flags = 0;
skb = dev_alloc_skb(sizeof(struct fip_vlan)); skb = dev_alloc_skb(sizeof(struct fip_vlan));
if (!skb) if (!skb)
...@@ -65,7 +66,9 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf) ...@@ -65,7 +66,9 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
kfree_skb(skb); kfree_skb(skb);
return; return;
} }
qed_ops->ll2->start_xmit(qedf->cdev, skb);
set_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &flags);
qed_ops->ll2->start_xmit(qedf->cdev, skb, flags);
} }
static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf, static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
...@@ -139,7 +142,7 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) ...@@ -139,7 +142,7 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1, print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, skb->len, false); skb->data, skb->len, false);
qed_ops->ll2->start_xmit(qedf->cdev, skb); qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
} }
/* Process incoming FIP frames. */ /* Process incoming FIP frames. */
......
...@@ -994,7 +994,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) ...@@ -994,7 +994,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
if (qedf_dump_frames) if (qedf_dump_frames)
print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
1, skb->data, skb->len, false); 1, skb->data, skb->len, false);
qed_ops->ll2->start_xmit(qedf->cdev, skb); qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
return 0; return 0;
} }
......
...@@ -1150,7 +1150,7 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid) ...@@ -1150,7 +1150,7 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
if (vlanid) if (vlanid)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
rc = qedi_ops->ll2->start_xmit(cdev, skb); rc = qedi_ops->ll2->start_xmit(cdev, skb, 0);
if (rc) { if (rc) {
QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n", QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n",
rc); rc);
......
...@@ -339,7 +339,6 @@ struct qed_dev_info { ...@@ -339,7 +339,6 @@ struct qed_dev_info {
u8 num_hwfns; u8 num_hwfns;
u8 hw_mac[ETH_ALEN]; u8 hw_mac[ETH_ALEN];
bool is_mf_default;
/* FW version */ /* FW version */
u16 fw_major; u16 fw_major;
...@@ -359,7 +358,7 @@ struct qed_dev_info { ...@@ -359,7 +358,7 @@ struct qed_dev_info {
#define QED_MFW_VERSION_3_OFFSET 24 #define QED_MFW_VERSION_3_OFFSET 24
u32 flash_size; u32 flash_size;
u8 mf_mode; bool b_inter_pf_switch;
bool tx_switching; bool tx_switching;
bool rdma_supported; bool rdma_supported;
u16 mtu; u16 mtu;
......
...@@ -202,6 +202,7 @@ struct qed_ll2_tx_pkt_info { ...@@ -202,6 +202,7 @@ struct qed_ll2_tx_pkt_info {
bool enable_ip_cksum; bool enable_ip_cksum;
bool enable_l4_cksum; bool enable_l4_cksum;
bool calc_ip_len; bool calc_ip_len;
bool remove_stag;
}; };
#define QED_LL2_UNUSED_HANDLE (0xff) #define QED_LL2_UNUSED_HANDLE (0xff)
...@@ -220,6 +221,11 @@ struct qed_ll2_params { ...@@ -220,6 +221,11 @@ struct qed_ll2_params {
u8 ll2_mac_address[ETH_ALEN]; u8 ll2_mac_address[ETH_ALEN];
}; };
enum qed_ll2_xmit_flags {
/* FIP discovery packet */
QED_LL2_XMIT_FLAGS_FIP_DISCOVERY
};
struct qed_ll2_ops { struct qed_ll2_ops {
/** /**
* @brief start - initializes ll2 * @brief start - initializes ll2
...@@ -245,10 +251,12 @@ struct qed_ll2_ops { ...@@ -245,10 +251,12 @@ struct qed_ll2_ops {
* *
* @param cdev * @param cdev
* @param skb * @param skb
* @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags.
* *
* @return 0 on success, otherwise error value. * @return 0 on success, otherwise error value.
*/ */
int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb); int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb,
unsigned long xmit_flags);
/** /**
* @brief register_cb_ops - protocol driver register the callback for Rx/Tx * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment