Commit 993feee9 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-tunneling-offload'

Manish Chopra says:

====================
qed/qede: Add tunneling support

This patch series adds support for VXLAN, GRE and GENEVE tunnels
to be used over this driver. With this support, adapter can perform
TSO offload, inner/outer checksums offloads on TX and RX for
encapsulated packets.

V1->V2 [ Comments from Jesse Gross incorporated ]
* Drop general infrastructure change patch.
  "net: Make vxlan/geneve default udp ports public"
* Remove by default Linux default UDP ports configurations in driver.
  Instead, use general registration APIs for UDP port configurations
* Removing .ndo_features_check - we will add it later with proper change.

Please consider applying this series to net-next.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ee1c2797 14db81de
...@@ -103,4 +103,25 @@ config QEDE ...@@ -103,4 +103,25 @@ config QEDE
depends on QED depends on QED
---help--- ---help---
This enables the support for ... This enables the support for ...
config QEDE_VXLAN
bool "Virtual eXtensible Local Area Network support"
default n
depends on QEDE && VXLAN && !(QEDE=y && VXLAN=m)
---help---
This enables hardware offload support for VXLAN protocol over
qede module. Say Y here if you want to enable hardware offload
support for Virtual eXtensible Local Area Network (VXLAN)
in the driver.
config QEDE_GENEVE
bool "Generic Network Virtualization Encapsulation (GENEVE) support"
depends on QEDE && GENEVE && !(QEDE=y && GENEVE=m)
---help---
This allows one to create GENEVE virtual interfaces that provide
Layer 2 Networks over Layer 3 Networks. GENEVE is often used
to tunnel virtual network infrastructure in virtualized environments.
Say Y here if you want to enable hardware offload support for
Generic Network Virtualization Encapsulation (GENEVE) in the driver.
endif # NET_VENDOR_QLOGIC endif # NET_VENDOR_QLOGIC
...@@ -74,6 +74,51 @@ struct qed_rt_data { ...@@ -74,6 +74,51 @@ struct qed_rt_data {
bool *b_valid; bool *b_valid;
}; };
enum qed_tunn_mode {
QED_MODE_L2GENEVE_TUNN,
QED_MODE_IPGENEVE_TUNN,
QED_MODE_L2GRE_TUNN,
QED_MODE_IPGRE_TUNN,
QED_MODE_VXLAN_TUNN,
};
enum qed_tunn_clss {
QED_TUNN_CLSS_MAC_VLAN,
QED_TUNN_CLSS_MAC_VNI,
QED_TUNN_CLSS_INNER_MAC_VLAN,
QED_TUNN_CLSS_INNER_MAC_VNI,
MAX_QED_TUNN_CLSS,
};
struct qed_tunn_start_params {
unsigned long tunn_mode;
u16 vxlan_udp_port;
u16 geneve_udp_port;
u8 update_vxlan_udp_port;
u8 update_geneve_udp_port;
u8 tunn_clss_vxlan;
u8 tunn_clss_l2geneve;
u8 tunn_clss_ipgeneve;
u8 tunn_clss_l2gre;
u8 tunn_clss_ipgre;
};
struct qed_tunn_update_params {
unsigned long tunn_mode_update_mask;
unsigned long tunn_mode;
u16 vxlan_udp_port;
u16 geneve_udp_port;
u8 update_rx_pf_clss;
u8 update_tx_pf_clss;
u8 update_vxlan_udp_port;
u8 update_geneve_udp_port;
u8 tunn_clss_vxlan;
u8 tunn_clss_l2geneve;
u8 tunn_clss_ipgeneve;
u8 tunn_clss_l2gre;
u8 tunn_clss_ipgre;
};
/* The PCI personality is not quite synonymous to protocol ID: /* The PCI personality is not quite synonymous to protocol ID:
* 1. All personalities need CORE connections * 1. All personalities need CORE connections
* 2. The Ethernet personality may support also the RoCE protocol * 2. The Ethernet personality may support also the RoCE protocol
...@@ -430,6 +475,7 @@ struct qed_dev { ...@@ -430,6 +475,7 @@ struct qed_dev {
u8 num_hwfns; u8 num_hwfns;
struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE]; struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
unsigned long tunn_mode;
u32 drv_type; u32 drv_type;
struct qed_eth_stats *reset_stats; struct qed_eth_stats *reset_stats;
......
...@@ -558,6 +558,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn, ...@@ -558,6 +558,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct qed_tunn_start_params *p_tunn,
int hw_mode, int hw_mode,
bool b_hw_start, bool b_hw_start,
enum qed_int_mode int_mode, enum qed_int_mode int_mode,
...@@ -625,7 +626,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, ...@@ -625,7 +626,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
qed_int_igu_enable(p_hwfn, p_ptt, int_mode); qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
/* send function start command */ /* send function start command */
rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode); rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode);
if (rc) if (rc)
DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
} }
...@@ -672,6 +673,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, ...@@ -672,6 +673,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
} }
int qed_hw_init(struct qed_dev *cdev, int qed_hw_init(struct qed_dev *cdev,
struct qed_tunn_start_params *p_tunn,
bool b_hw_start, bool b_hw_start,
enum qed_int_mode int_mode, enum qed_int_mode int_mode,
bool allow_npar_tx_switch, bool allow_npar_tx_switch,
...@@ -724,7 +726,7 @@ int qed_hw_init(struct qed_dev *cdev, ...@@ -724,7 +726,7 @@ int qed_hw_init(struct qed_dev *cdev,
/* Fall into */ /* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION: case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode, p_tunn, p_hwfn->hw_info.hw_mode,
b_hw_start, int_mode, b_hw_start, int_mode,
allow_npar_tx_switch); allow_npar_tx_switch);
break; break;
......
...@@ -62,6 +62,7 @@ void qed_resc_setup(struct qed_dev *cdev); ...@@ -62,6 +62,7 @@ void qed_resc_setup(struct qed_dev *cdev);
* @brief qed_hw_init - * @brief qed_hw_init -
* *
* @param cdev * @param cdev
* @param p_tunn
* @param b_hw_start * @param b_hw_start
* @param int_mode - interrupt mode [msix, inta, etc.] to use. * @param int_mode - interrupt mode [msix, inta, etc.] to use.
* @param allow_npar_tx_switch - npar tx switching to be used * @param allow_npar_tx_switch - npar tx switching to be used
...@@ -72,6 +73,7 @@ void qed_resc_setup(struct qed_dev *cdev); ...@@ -72,6 +73,7 @@ void qed_resc_setup(struct qed_dev *cdev);
* @return int * @return int
*/ */
int qed_hw_init(struct qed_dev *cdev, int qed_hw_init(struct qed_dev *cdev,
struct qed_tunn_start_params *p_tunn,
bool b_hw_start, bool b_hw_start,
enum qed_int_mode int_mode, enum qed_int_mode int_mode,
bool allow_npar_tx_switch, bool allow_npar_tx_switch,
......
...@@ -46,7 +46,7 @@ enum common_ramrod_cmd_id { ...@@ -46,7 +46,7 @@ enum common_ramrod_cmd_id {
COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
COMMON_RAMROD_RESERVED, COMMON_RAMROD_RESERVED,
COMMON_RAMROD_RESERVED2, COMMON_RAMROD_RESERVED2,
COMMON_RAMROD_RESERVED3, COMMON_RAMROD_PF_UPDATE,
COMMON_RAMROD_EMPTY, COMMON_RAMROD_EMPTY,
MAX_COMMON_RAMROD_CMD_ID MAX_COMMON_RAMROD_CMD_ID
}; };
...@@ -626,6 +626,42 @@ struct pf_start_ramrod_data { ...@@ -626,6 +626,42 @@ struct pf_start_ramrod_data {
u8 reserved0[4]; u8 reserved0[4];
}; };
/* tunnel configuration */
struct pf_update_tunnel_config {
u8 update_rx_pf_clss;
u8 update_tx_pf_clss;
u8 set_vxlan_udp_port_flg;
u8 set_geneve_udp_port_flg;
u8 tx_enable_vxlan;
u8 tx_enable_l2geneve;
u8 tx_enable_ipgeneve;
u8 tx_enable_l2gre;
u8 tx_enable_ipgre;
u8 tunnel_clss_vxlan;
u8 tunnel_clss_l2geneve;
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre;
u8 tunnel_clss_ipgre;
__le16 vxlan_udp_port;
__le16 geneve_udp_port;
__le16 reserved[3];
};
struct pf_update_ramrod_data {
u32 reserved[2];
u32 reserved_1[6];
struct pf_update_tunnel_config tunnel_config;
};
/* Tunnel classification scheme */
enum tunnel_clss {
TUNNEL_CLSS_MAC_VLAN = 0,
TUNNEL_CLSS_MAC_VNI,
TUNNEL_CLSS_INNER_MAC_VLAN,
TUNNEL_CLSS_INNER_MAC_VNI,
MAX_TUNNEL_CLSS
};
enum ports_mode { enum ports_mode {
ENGX2_PORTX1 /* 2 engines x 1 port */, ENGX2_PORTX1 /* 2 engines x 1 port */,
ENGX2_PORTX2 /* 2 engines x 2 ports */, ENGX2_PORTX2 /* 2 engines x 2 ports */,
...@@ -1603,6 +1639,19 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, ...@@ -1603,6 +1639,19 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
u16 start_pq, u16 start_pq,
u16 num_pqs); u16 num_pqs);
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port);
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool vxlan_enable);
void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool eth_gre_enable,
bool ip_gre_enable);
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port);
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool eth_geneve_enable,
bool ip_geneve_enable);
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ /* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) #define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) #define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
......
...@@ -788,3 +788,130 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, ...@@ -788,3 +788,130 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
return true; return true;
} }
static void
qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
{
if (enable)
set_bit(bit, var);
else
clear_bit(bit, var);
}
#define PRS_ETH_TUNN_FIC_FORMAT -188897008
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 dest_port)
{
qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
}
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool vxlan_enable)
{
unsigned long reg_val = 0;
u8 shift;
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val)
qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
PRS_ETH_TUNN_FIC_FORMAT);
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
vxlan_enable ? 1 : 0);
}
void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
bool eth_gre_enable, bool ip_gre_enable)
{
unsigned long reg_val = 0;
u8 shift;
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val)
qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
PRS_ETH_TUNN_FIC_FORMAT);
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
eth_gre_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
ip_gre_enable ? 1 : 0);
}
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 dest_port)
{
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
}
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_geneve_enable,
bool ip_geneve_enable)
{
unsigned long reg_val = 0;
u8 shift;
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val)
qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
PRS_ETH_TUNN_FIC_FORMAT);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
/* comp ver */
reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
/* EDPM with geneve tunnel not supported in BB_B0 */
if (QED_IS_BB_B0(p_hwfn->cdev))
return;
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
ip_geneve_enable ? 1 : 0);
}
...@@ -1884,6 +1884,36 @@ static int qed_stop_txq(struct qed_dev *cdev, ...@@ -1884,6 +1884,36 @@ static int qed_stop_txq(struct qed_dev *cdev,
return 0; return 0;
} }
static int qed_tunn_configure(struct qed_dev *cdev,
struct qed_tunn_params *tunn_params)
{
struct qed_tunn_update_params tunn_info;
int i, rc;
memset(&tunn_info, 0, sizeof(tunn_info));
if (tunn_params->update_vxlan_port == 1) {
tunn_info.update_vxlan_udp_port = 1;
tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
}
if (tunn_params->update_geneve_port == 1) {
tunn_info.update_geneve_udp_port = 1;
tunn_info.geneve_udp_port = tunn_params->geneve_port;
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];
rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
QED_SPQ_MODE_EBLOCK, NULL);
if (rc)
return rc;
}
return 0;
}
static int qed_configure_filter_rx_mode(struct qed_dev *cdev, static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
enum qed_filter_rx_mode_type type) enum qed_filter_rx_mode_type type)
{ {
...@@ -2026,6 +2056,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = { ...@@ -2026,6 +2056,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.fastpath_stop = &qed_fastpath_stop, .fastpath_stop = &qed_fastpath_stop,
.eth_cqe_completion = &qed_fp_cqe_completion, .eth_cqe_completion = &qed_fp_cqe_completion,
.get_vport_stats = &qed_get_vport_stats, .get_vport_stats = &qed_get_vport_stats,
.tunn_config = &qed_tunn_configure,
}; };
const struct qed_eth_ops *qed_get_eth_ops(void) const struct qed_eth_ops *qed_get_eth_ops(void)
......
...@@ -744,6 +744,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, ...@@ -744,6 +744,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
static int qed_slowpath_start(struct qed_dev *cdev, static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params) struct qed_slowpath_params *params)
{ {
struct qed_tunn_start_params tunn_info;
struct qed_mcp_drv_version drv_version; struct qed_mcp_drv_version drv_version;
const u8 *data = NULL; const u8 *data = NULL;
struct qed_hwfn *hwfn; struct qed_hwfn *hwfn;
...@@ -776,7 +777,19 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -776,7 +777,19 @@ static int qed_slowpath_start(struct qed_dev *cdev,
/* Start the slowpath */ /* Start the slowpath */
data = cdev->firmware->data; data = cdev->firmware->data;
rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode, memset(&tunn_info, 0, sizeof(tunn_info));
tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
1 << QED_MODE_L2GRE_TUNN |
1 << QED_MODE_IPGRE_TUNN |
1 << QED_MODE_L2GENEVE_TUNN |
1 << QED_MODE_IPGENEVE_TUNN;
tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
rc = qed_hw_init(cdev, &tunn_info, true,
cdev->int_params.out.int_mode,
true, data); true, data);
if (rc) if (rc)
goto err2; goto err2;
......
...@@ -427,4 +427,35 @@ ...@@ -427,4 +427,35 @@
0x2aae60UL 0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \ #define PGLUE_B_REG_PF_BAR1_SIZE \
0x2aae64UL 0x2aae64UL
#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
#define PRS_REG_VXLAN_PORT 0x1f0738UL
#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0)
#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0
#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE (0x1 << 1)
#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1
#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2)
#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
#define NIG_REG_VXLAN_PORT 0x50105cUL
#define PBF_REG_VXLAN_PORT 0xd80518UL
#define PBF_REG_NGE_PORT 0xd8051cUL
#define PRS_REG_NGE_PORT 0x1f086cUL
#define NIG_REG_NGE_PORT 0x508b38UL
#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
#define NIG_REG_NGE_COMP_VER 0x508b30UL
#define PBF_REG_NGE_COMP_VER 0xd80524UL
#define PRS_REG_NGE_COMP_VER 0x1f0878UL
#endif #endif
...@@ -52,6 +52,7 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, ...@@ -52,6 +52,7 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
union ramrod_data { union ramrod_data {
struct pf_start_ramrod_data pf_start; struct pf_start_ramrod_data pf_start;
struct pf_update_ramrod_data pf_update;
struct rx_queue_start_ramrod_data rx_queue_start; struct rx_queue_start_ramrod_data rx_queue_start;
struct rx_queue_update_ramrod_data rx_queue_update; struct rx_queue_update_ramrod_data rx_queue_update;
struct rx_queue_stop_ramrod_data rx_queue_stop; struct rx_queue_stop_ramrod_data rx_queue_stop;
...@@ -338,12 +339,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -338,12 +339,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* to the internal RAM of the UStorm by the Function Start Ramrod. * to the internal RAM of the UStorm by the Function Start Ramrod.
* *
* @param p_hwfn * @param p_hwfn
* @param p_tunn
* @param mode * @param mode
* *
* @return int * @return int
*/ */
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_tunn_start_params *p_tunn,
enum qed_mf_mode mode); enum qed_mf_mode mode);
/** /**
...@@ -362,4 +365,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -362,4 +365,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
struct qed_tunn_update_params *p_tunn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
#endif #endif
...@@ -87,7 +87,217 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -87,7 +87,217 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
{
switch (type) {
case QED_TUNN_CLSS_MAC_VLAN:
return TUNNEL_CLSS_MAC_VLAN;
case QED_TUNN_CLSS_MAC_VNI:
return TUNNEL_CLSS_MAC_VNI;
case QED_TUNN_CLSS_INNER_MAC_VLAN:
return TUNNEL_CLSS_INNER_MAC_VLAN;
case QED_TUNN_CLSS_INNER_MAC_VNI:
return TUNNEL_CLSS_INNER_MAC_VNI;
default:
return TUNNEL_CLSS_MAC_VLAN;
}
}
static void
qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn,
struct qed_tunn_update_params *p_src,
struct pf_update_tunnel_config *p_tunn_cfg)
{
unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode;
unsigned long update_mask = p_src->tunn_mode_update_mask;
unsigned long tunn_mode = p_src->tunn_mode;
unsigned long new_tunn_mode = 0;
if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) {
if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
__set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
} else {
if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode))
__set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
}
if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) {
if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
__set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
} else {
if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode))
__set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
}
if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) {
if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
__set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
} else {
if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode))
__set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
}
if (p_src->update_geneve_udp_port) {
p_tunn_cfg->set_geneve_udp_port_flg = 1;
p_tunn_cfg->geneve_udp_port =
cpu_to_le16(p_src->geneve_udp_port);
}
if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) {
if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
__set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
} else {
if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
__set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
}
if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) {
if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
__set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
} else {
if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
__set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
}
p_src->tunn_mode = new_tunn_mode;
}
static void
qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
struct qed_tunn_update_params *p_src,
struct pf_update_tunnel_config *p_tunn_cfg)
{
unsigned long tunn_mode = p_src->tunn_mode;
enum tunnel_clss type;
qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
p_tunn_cfg->tunnel_clss_vxlan = type;
type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
p_tunn_cfg->tunnel_clss_l2gre = type;
type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
p_tunn_cfg->tunnel_clss_ipgre = type;
if (p_src->update_vxlan_udp_port) {
p_tunn_cfg->set_vxlan_udp_port_flg = 1;
p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
}
if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_l2gre = 1;
if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_ipgre = 1;
if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_vxlan = 1;
if (p_src->update_geneve_udp_port) {
p_tunn_cfg->set_geneve_udp_port_flg = 1;
p_tunn_cfg->geneve_udp_port =
cpu_to_le16(p_src->geneve_udp_port);
}
if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_l2geneve = 1;
if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_ipgeneve = 1;
type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
p_tunn_cfg->tunnel_clss_l2geneve = type;
type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
p_tunn_cfg->tunnel_clss_ipgeneve = type;
}
static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
unsigned long tunn_mode)
{
u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
u8 l2geneve_enable = 0, ipgeneve_enable = 0;
if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
l2gre_enable = 1;
if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
ipgre_enable = 1;
if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
vxlan_enable = 1;
qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
l2geneve_enable = 1;
if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
ipgeneve_enable = 1;
qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
ipgeneve_enable);
}
static void
qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
struct qed_tunn_start_params *p_src,
struct pf_start_tunnel_config *p_tunn_cfg)
{
unsigned long tunn_mode;
enum tunnel_clss type;
if (!p_src)
return;
tunn_mode = p_src->tunn_mode;
type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
p_tunn_cfg->tunnel_clss_vxlan = type;
type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
p_tunn_cfg->tunnel_clss_l2gre = type;
type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
p_tunn_cfg->tunnel_clss_ipgre = type;
if (p_src->update_vxlan_udp_port) {
p_tunn_cfg->set_vxlan_udp_port_flg = 1;
p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
}
if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_l2gre = 1;
if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_ipgre = 1;
if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_vxlan = 1;
if (p_src->update_geneve_udp_port) {
p_tunn_cfg->set_geneve_udp_port_flg = 1;
p_tunn_cfg->geneve_udp_port =
cpu_to_le16(p_src->geneve_udp_port);
}
if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_l2geneve = 1;
if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_ipgeneve = 1;
type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
p_tunn_cfg->tunnel_clss_l2geneve = type;
type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
p_tunn_cfg->tunnel_clss_ipgeneve = type;
}
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_tunn_start_params *p_tunn,
enum qed_mf_mode mode) enum qed_mf_mode mode)
{ {
struct pf_start_ramrod_data *p_ramrod = NULL; struct pf_start_ramrod_data *p_ramrod = NULL;
...@@ -143,6 +353,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -143,6 +353,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
p_hwfn->p_consq->chain.pbl.p_phys_table); p_hwfn->p_consq->chain.pbl.p_phys_table);
qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
&p_ramrod->tunnel_config);
p_hwfn->hw_info.personality = PERSONALITY_ETH; p_hwfn->hw_info.personality = PERSONALITY_ETH;
DP_VERBOSE(p_hwfn, QED_MSG_SPQ, DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
...@@ -153,6 +365,49 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -153,6 +365,49 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
/* Set pf update ramrod command params */
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
struct qed_tunn_update_params *p_tunn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
{
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_data;
rc = qed_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
&init_data);
if (rc)
return rc;
qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
&p_ent->ramrod.pf_update.tunnel_config);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (rc)
return rc;
if (p_tunn->update_vxlan_udp_port)
qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
p_tunn->vxlan_udp_port);
if (p_tunn->update_geneve_udp_port)
qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
p_tunn->geneve_udp_port);
qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
return rc;
}
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn) int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
{ {
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
......
...@@ -169,6 +169,8 @@ struct qede_dev { ...@@ -169,6 +169,8 @@ struct qede_dev {
bool accept_any_vlan; bool accept_any_vlan;
struct delayed_work sp_task; struct delayed_work sp_task;
unsigned long sp_flags; unsigned long sp_flags;
u16 vxlan_dst_port;
u16 geneve_dst_port;
}; };
enum QEDE_STATE { enum QEDE_STATE {
...@@ -288,8 +290,11 @@ struct qede_fastpath { ...@@ -288,8 +290,11 @@ struct qede_fastpath {
#define QEDE_CSUM_ERROR BIT(0) #define QEDE_CSUM_ERROR BIT(0)
#define QEDE_CSUM_UNNECESSARY BIT(1) #define QEDE_CSUM_UNNECESSARY BIT(1)
#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
#define QEDE_SP_RX_MODE 1 #define QEDE_SP_RX_MODE 1
#define QEDE_SP_VXLAN_PORT_CONFIG 2
#define QEDE_SP_GENEVE_PORT_CONFIG 3
union qede_reload_args { union qede_reload_args {
u16 mtu; u16 mtu;
......
...@@ -24,7 +24,12 @@ ...@@ -24,7 +24,12 @@
#include <linux/netdev_features.h> #include <linux/netdev_features.h>
#include <linux/udp.h> #include <linux/udp.h>
#include <linux/tcp.h> #include <linux/tcp.h>
#ifdef CONFIG_QEDE_VXLAN
#include <net/vxlan.h> #include <net/vxlan.h>
#endif
#ifdef CONFIG_QEDE_GENEVE
#include <net/geneve.h>
#endif
#include <linux/ip.h> #include <linux/ip.h>
#include <net/ipv6.h> #include <net/ipv6.h>
#include <net/tcp.h> #include <net/tcp.h>
...@@ -310,6 +315,9 @@ static u32 qede_xmit_type(struct qede_dev *edev, ...@@ -310,6 +315,9 @@ static u32 qede_xmit_type(struct qede_dev *edev,
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
*ipv6_ext = 1; *ipv6_ext = 1;
if (skb->encapsulation)
rc |= XMIT_ENC;
if (skb_is_gso(skb)) if (skb_is_gso(skb))
rc |= XMIT_LSO; rc |= XMIT_LSO;
...@@ -371,6 +379,16 @@ static int map_frag_to_bd(struct qede_dev *edev, ...@@ -371,6 +379,16 @@ static int map_frag_to_bd(struct qede_dev *edev,
return 0; return 0;
} }
static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
{
if (is_encap_pkt)
return (skb_inner_transport_header(skb) +
inner_tcp_hdrlen(skb) - skb->data);
else
return (skb_transport_header(skb) +
tcp_hdrlen(skb) - skb->data);
}
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb, static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
...@@ -381,8 +399,7 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb, ...@@ -381,8 +399,7 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
if (xmit_type & XMIT_LSO) { if (xmit_type & XMIT_LSO) {
int hlen; int hlen;
hlen = skb_transport_header(skb) + hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
tcp_hdrlen(skb) - skb->data;
/* linear payload would require its own BD */ /* linear payload would require its own BD */
if (skb_headlen(skb) > hlen) if (skb_headlen(skb) > hlen)
...@@ -490,7 +507,18 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, ...@@ -490,7 +507,18 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
first_bd->data.bd_flags.bitfields |= first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
if (xmit_type & XMIT_ENC) {
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
} else {
/* In cases when OS doesn't indicate for inner offloads
* when packet is tunnelled, we need to override the HW
* tunnel configuration so that packets are treated as
* regular non tunnelled packets and no inner offloads
* are done by the hardware.
*/
first_bd->data.bitfields |= cpu_to_le16(temp); first_bd->data.bitfields |= cpu_to_le16(temp);
}
/* If the packet is IPv6 with extension header, indicate that /* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't * to FW and pass few params, since the device cracker doesn't
...@@ -506,10 +534,15 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, ...@@ -506,10 +534,15 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
third_bd->data.lso_mss = third_bd->data.lso_mss =
cpu_to_le16(skb_shinfo(skb)->gso_size); cpu_to_le16(skb_shinfo(skb)->gso_size);
if (unlikely(xmit_type & XMIT_ENC)) {
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
hlen = qede_get_skb_hlen(skb, true);
} else {
first_bd->data.bd_flags.bitfields |= first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
hlen = skb_transport_header(skb) + hlen = qede_get_skb_hlen(skb, false);
tcp_hdrlen(skb) - skb->data; }
/* @@@TBD - if will not be removed need to check */ /* @@@TBD - if will not be removed need to check */
third_bd->data.bitfields |= third_bd->data.bitfields |=
...@@ -843,6 +876,9 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) ...@@ -843,6 +876,9 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
if (csum_flag & QEDE_CSUM_UNNECESSARY) if (csum_flag & QEDE_CSUM_UNNECESSARY)
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
skb->csum_level = 1;
} }
static inline void qede_skb_receive(struct qede_dev *edev, static inline void qede_skb_receive(struct qede_dev *edev,
...@@ -1132,13 +1168,47 @@ static void qede_tpa_end(struct qede_dev *edev, ...@@ -1132,13 +1168,47 @@ static void qede_tpa_end(struct qede_dev *edev,
tpa_info->skb = NULL; tpa_info->skb = NULL;
} }
static u8 qede_check_csum(u16 flag) static bool qede_tunn_exist(u16 flag)
{
return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
}
static u8 qede_check_tunn_csum(u16 flag)
{
u16 csum_flag = 0;
u8 tcsum = 0;
if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
}
csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
if (csum_flag & flag)
return QEDE_CSUM_ERROR;
return QEDE_CSUM_UNNECESSARY | tcsum;
}
static u8 qede_check_notunn_csum(u16 flag)
{ {
u16 csum_flag = 0; u16 csum_flag = 0;
u8 csum = 0; u8 csum = 0;
if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) { PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
csum = QEDE_CSUM_UNNECESSARY; csum = QEDE_CSUM_UNNECESSARY;
...@@ -1153,6 +1223,14 @@ static u8 qede_check_csum(u16 flag) ...@@ -1153,6 +1223,14 @@ static u8 qede_check_csum(u16 flag)
return csum; return csum;
} }
static u8 qede_check_csum(u16 flag)
{
if (!qede_tunn_exist(flag))
return qede_check_notunn_csum(flag);
else
return qede_check_tunn_csum(flag);
}
static int qede_rx_int(struct qede_fastpath *fp, int budget) static int qede_rx_int(struct qede_fastpath *fp, int budget)
{ {
struct qede_dev *edev = fp->edev; struct qede_dev *edev = fp->edev;
...@@ -1821,6 +1899,76 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) ...@@ -1821,6 +1899,76 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
edev->accept_any_vlan = false; edev->accept_any_vlan = false;
} }
#ifdef CONFIG_QEDE_VXLAN
static void qede_add_vxlan_port(struct net_device *dev,
sa_family_t sa_family, __be16 port)
{
struct qede_dev *edev = netdev_priv(dev);
u16 t_port = ntohs(port);
if (edev->vxlan_dst_port)
return;
edev->vxlan_dst_port = t_port;
DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port);
set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
schedule_delayed_work(&edev->sp_task, 0);
}
static void qede_del_vxlan_port(struct net_device *dev,
sa_family_t sa_family, __be16 port)
{
struct qede_dev *edev = netdev_priv(dev);
u16 t_port = ntohs(port);
if (t_port != edev->vxlan_dst_port)
return;
edev->vxlan_dst_port = 0;
DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port);
set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
schedule_delayed_work(&edev->sp_task, 0);
}
#endif
#ifdef CONFIG_QEDE_GENEVE
static void qede_add_geneve_port(struct net_device *dev,
sa_family_t sa_family, __be16 port)
{
struct qede_dev *edev = netdev_priv(dev);
u16 t_port = ntohs(port);
if (edev->geneve_dst_port)
return;
edev->geneve_dst_port = t_port;
DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port);
set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
schedule_delayed_work(&edev->sp_task, 0);
}
static void qede_del_geneve_port(struct net_device *dev,
sa_family_t sa_family, __be16 port)
{
struct qede_dev *edev = netdev_priv(dev);
u16 t_port = ntohs(port);
if (t_port != edev->geneve_dst_port)
return;
edev->geneve_dst_port = 0;
DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port);
set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
schedule_delayed_work(&edev->sp_task, 0);
}
#endif
static const struct net_device_ops qede_netdev_ops = { static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open, .ndo_open = qede_open,
.ndo_stop = qede_close, .ndo_stop = qede_close,
...@@ -1832,6 +1980,14 @@ static const struct net_device_ops qede_netdev_ops = { ...@@ -1832,6 +1980,14 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
.ndo_get_stats64 = qede_get_stats64, .ndo_get_stats64 = qede_get_stats64,
#ifdef CONFIG_QEDE_VXLAN
.ndo_add_vxlan_port = qede_add_vxlan_port,
.ndo_del_vxlan_port = qede_del_vxlan_port,
#endif
#ifdef CONFIG_QEDE_GENEVE
.ndo_add_geneve_port = qede_add_geneve_port,
.ndo_del_geneve_port = qede_del_geneve_port,
#endif
}; };
/* ------------------------------------------------------------------------- /* -------------------------------------------------------------------------
...@@ -1904,6 +2060,14 @@ static void qede_init_ndev(struct qede_dev *edev) ...@@ -1904,6 +2060,14 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6; NETIF_F_TSO | NETIF_F_TSO6;
/* Encap features*/
hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_TSO_ECN;
ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HIGHDMA; NETIF_F_HIGHDMA;
ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
...@@ -2004,6 +2168,8 @@ static void qede_sp_task(struct work_struct *work) ...@@ -2004,6 +2168,8 @@ static void qede_sp_task(struct work_struct *work)
{ {
struct qede_dev *edev = container_of(work, struct qede_dev, struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work); sp_task.work);
struct qed_dev *cdev = edev->cdev;
mutex_lock(&edev->qede_lock); mutex_lock(&edev->qede_lock);
if (edev->state == QEDE_STATE_OPEN) { if (edev->state == QEDE_STATE_OPEN) {
...@@ -2011,6 +2177,24 @@ static void qede_sp_task(struct work_struct *work) ...@@ -2011,6 +2177,24 @@ static void qede_sp_task(struct work_struct *work)
qede_config_rx_mode(edev->ndev); qede_config_rx_mode(edev->ndev);
} }
if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
struct qed_tunn_params tunn_params;
memset(&tunn_params, 0, sizeof(tunn_params));
tunn_params.update_vxlan_port = 1;
tunn_params.vxlan_port = edev->vxlan_dst_port;
qed_ops->tunn_config(cdev, &tunn_params);
}
if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
struct qed_tunn_params tunn_params;
memset(&tunn_params, 0, sizeof(tunn_params));
tunn_params.update_geneve_port = 1;
tunn_params.geneve_port = edev->geneve_dst_port;
qed_ops->tunn_config(cdev, &tunn_params);
}
mutex_unlock(&edev->qede_lock); mutex_unlock(&edev->qede_lock);
} }
...@@ -3149,12 +3333,24 @@ void qede_reload(struct qede_dev *edev, ...@@ -3149,12 +3333,24 @@ void qede_reload(struct qede_dev *edev,
static int qede_open(struct net_device *ndev) static int qede_open(struct net_device *ndev)
{ {
struct qede_dev *edev = netdev_priv(ndev); struct qede_dev *edev = netdev_priv(ndev);
int rc;
netif_carrier_off(ndev); netif_carrier_off(ndev);
edev->ops->common->set_power_state(edev->cdev, PCI_D0); edev->ops->common->set_power_state(edev->cdev, PCI_D0);
return qede_load(edev, QEDE_LOAD_NORMAL); rc = qede_load(edev, QEDE_LOAD_NORMAL);
if (rc)
return rc;
#ifdef CONFIG_QEDE_VXLAN
vxlan_get_rx_port(ndev);
#endif
#ifdef CONFIG_QEDE_GENEVE
geneve_get_rx_port(ndev);
#endif
return 0;
} }
static int qede_close(struct net_device *ndev) static int qede_close(struct net_device *ndev)
......
...@@ -112,6 +112,13 @@ struct qed_queue_start_common_params { ...@@ -112,6 +112,13 @@ struct qed_queue_start_common_params {
u16 sb_idx; u16 sb_idx;
}; };
struct qed_tunn_params {
u16 vxlan_port;
u8 update_vxlan_port;
u16 geneve_port;
u8 update_geneve_port;
};
struct qed_eth_cb_ops { struct qed_eth_cb_ops {
struct qed_common_cb_ops common; struct qed_common_cb_ops common;
}; };
...@@ -166,6 +173,9 @@ struct qed_eth_ops { ...@@ -166,6 +173,9 @@ struct qed_eth_ops {
void (*get_vport_stats)(struct qed_dev *cdev, void (*get_vport_stats)(struct qed_dev *cdev,
struct qed_eth_stats *stats); struct qed_eth_stats *stats);
int (*tunn_config)(struct qed_dev *cdev,
struct qed_tunn_params *params);
}; };
const struct qed_eth_ops *qed_get_eth_ops(void); const struct qed_eth_ops *qed_get_eth_ops(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment