Commit c1a9f80e authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-vf-tunnel'

Manish Chopra says:

====================
qed/qede: VF tunnelling support

With this series VFs can run vxlan/geneve/gre tunnels over it.
Please consider applying this series to "net-next"
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 36784277 eaf3c0c6
...@@ -149,9 +149,35 @@ enum qed_tunn_clss { ...@@ -149,9 +149,35 @@ enum qed_tunn_clss {
QED_TUNN_CLSS_MAC_VNI, QED_TUNN_CLSS_MAC_VNI,
QED_TUNN_CLSS_INNER_MAC_VLAN, QED_TUNN_CLSS_INNER_MAC_VLAN,
QED_TUNN_CLSS_INNER_MAC_VNI, QED_TUNN_CLSS_INNER_MAC_VNI,
QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
MAX_QED_TUNN_CLSS, MAX_QED_TUNN_CLSS,
}; };
struct qed_tunn_update_type {
bool b_update_mode;
bool b_mode_enabled;
enum qed_tunn_clss tun_cls;
};
struct qed_tunn_update_udp_port {
bool b_update_port;
u16 port;
};
struct qed_tunnel_info {
struct qed_tunn_update_type vxlan;
struct qed_tunn_update_type l2_geneve;
struct qed_tunn_update_type ip_geneve;
struct qed_tunn_update_type l2_gre;
struct qed_tunn_update_type ip_gre;
struct qed_tunn_update_udp_port vxlan_port;
struct qed_tunn_update_udp_port geneve_port;
bool b_update_rx_cls;
bool b_update_tx_cls;
};
struct qed_tunn_start_params { struct qed_tunn_start_params {
unsigned long tunn_mode; unsigned long tunn_mode;
u16 vxlan_udp_port; u16 vxlan_udp_port;
...@@ -648,9 +674,7 @@ struct qed_dev { ...@@ -648,9 +674,7 @@ struct qed_dev {
/* SRIOV */ /* SRIOV */
struct qed_hw_sriov_info *p_iov_info; struct qed_hw_sriov_info *p_iov_info;
#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info) #define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
struct qed_tunnel_info tunnel;
unsigned long tunn_mode;
bool b_is_vf; bool b_is_vf;
u32 drv_type; u32 drv_type;
struct qed_eth_stats *reset_stats; struct qed_eth_stats *reset_stats;
...@@ -694,6 +718,7 @@ struct qed_dev { ...@@ -694,6 +718,7 @@ struct qed_dev {
u32 rdma_max_sge; u32 rdma_max_sge;
u32 rdma_max_inline; u32 rdma_max_inline;
u32 rdma_max_srq_sge; u32 rdma_max_srq_sge;
u16 tunn_feature_mask;
}; };
#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \ #define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
......
...@@ -1453,7 +1453,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn, ...@@ -1453,7 +1453,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct qed_tunn_start_params *p_tunn, struct qed_tunnel_info *p_tunn,
int hw_mode, int hw_mode,
bool b_hw_start, bool b_hw_start,
enum qed_int_mode int_mode, enum qed_int_mode int_mode,
...@@ -1594,6 +1594,19 @@ qed_fill_load_req_params(struct qed_load_req_params *p_load_req, ...@@ -1594,6 +1594,19 @@ qed_fill_load_req_params(struct qed_load_req_params *p_load_req,
p_load_req->override_force_load = p_drv_load->override_force_load; p_load_req->override_force_load = p_drv_load->override_force_load;
} }
static int qed_vf_start(struct qed_hwfn *p_hwfn,
struct qed_hw_init_params *p_params)
{
if (p_params->p_tunn) {
qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
}
p_hwfn->b_int_enabled = 1;
return 0;
}
int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
{ {
struct qed_load_req_params load_req_params; struct qed_load_req_params load_req_params;
...@@ -1623,7 +1636,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) ...@@ -1623,7 +1636,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
} }
if (IS_VF(cdev)) { if (IS_VF(cdev)) {
p_hwfn->b_int_enabled = 1; qed_vf_start(p_hwfn, p_params);
continue; continue;
} }
......
...@@ -113,7 +113,7 @@ struct qed_drv_load_params { ...@@ -113,7 +113,7 @@ struct qed_drv_load_params {
struct qed_hw_init_params { struct qed_hw_init_params {
/* Tunneling parameters */ /* Tunneling parameters */
struct qed_tunn_start_params *p_tunn; struct qed_tunnel_info *p_tunn;
bool b_hw_start; bool b_hw_start;
......
...@@ -2285,31 +2285,46 @@ static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle) ...@@ -2285,31 +2285,46 @@ static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
static int qed_tunn_configure(struct qed_dev *cdev, static int qed_tunn_configure(struct qed_dev *cdev,
struct qed_tunn_params *tunn_params) struct qed_tunn_params *tunn_params)
{ {
struct qed_tunn_update_params tunn_info; struct qed_tunnel_info tunn_info;
int i, rc; int i, rc;
if (IS_VF(cdev))
return 0;
memset(&tunn_info, 0, sizeof(tunn_info)); memset(&tunn_info, 0, sizeof(tunn_info));
if (tunn_params->update_vxlan_port == 1) { if (tunn_params->update_vxlan_port) {
tunn_info.update_vxlan_udp_port = 1; tunn_info.vxlan_port.b_update_port = true;
tunn_info.vxlan_udp_port = tunn_params->vxlan_port; tunn_info.vxlan_port.port = tunn_params->vxlan_port;
} }
if (tunn_params->update_geneve_port == 1) { if (tunn_params->update_geneve_port) {
tunn_info.update_geneve_udp_port = 1; tunn_info.geneve_port.b_update_port = true;
tunn_info.geneve_udp_port = tunn_params->geneve_port; tunn_info.geneve_port.port = tunn_params->geneve_port;
} }
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i]; struct qed_hwfn *hwfn = &cdev->hwfns[i];
struct qed_tunnel_info *tun;
tun = &hwfn->cdev->tunnel;
rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info, rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
QED_SPQ_MODE_EBLOCK, NULL); QED_SPQ_MODE_EBLOCK, NULL);
if (rc) if (rc)
return rc; return rc;
if (IS_PF_SRIOV(hwfn)) {
u16 vxlan_port, geneve_port;
int j;
vxlan_port = tun->vxlan_port.port;
geneve_port = tun->geneve_port.port;
qed_for_each_vf(hwfn, j) {
qed_iov_bulletin_set_udp_ports(hwfn, j,
vxlan_port,
geneve_port);
}
qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
}
} }
return 0; return 0;
......
...@@ -230,10 +230,25 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) ...@@ -230,10 +230,25 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
int qed_fill_dev_info(struct qed_dev *cdev, int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info) struct qed_dev_info *dev_info)
{ {
struct qed_tunnel_info *tun = &cdev->tunnel;
struct qed_ptt *ptt; struct qed_ptt *ptt;
memset(dev_info, 0, sizeof(struct qed_dev_info)); memset(dev_info, 0, sizeof(struct qed_dev_info));
if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
tun->vxlan.b_mode_enabled)
dev_info->vxlan_enable = true;
if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
dev_info->gre_enable = true;
if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
dev_info->geneve_enable = true;
dev_info->num_hwfns = cdev->num_hwfns; dev_info->num_hwfns = cdev->num_hwfns;
dev_info->pci_mem_start = cdev->pci_params.mem_start; dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_mem_end = cdev->pci_params.mem_end;
...@@ -909,8 +924,8 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -909,8 +924,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
{ {
struct qed_drv_load_params drv_load_params; struct qed_drv_load_params drv_load_params;
struct qed_hw_init_params hw_init_params; struct qed_hw_init_params hw_init_params;
struct qed_tunn_start_params tunn_info;
struct qed_mcp_drv_version drv_version; struct qed_mcp_drv_version drv_version;
struct qed_tunnel_info tunn_info;
const u8 *data = NULL; const u8 *data = NULL;
struct qed_hwfn *hwfn; struct qed_hwfn *hwfn;
struct qed_ptt *p_ptt; struct qed_ptt *p_ptt;
...@@ -974,19 +989,19 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -974,19 +989,19 @@ static int qed_slowpath_start(struct qed_dev *cdev,
qed_dbg_pf_init(cdev); qed_dbg_pf_init(cdev);
} }
memset(&tunn_info, 0, sizeof(tunn_info));
tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
1 << QED_MODE_L2GRE_TUNN |
1 << QED_MODE_IPGRE_TUNN |
1 << QED_MODE_L2GENEVE_TUNN |
1 << QED_MODE_IPGENEVE_TUNN;
tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
/* Start the slowpath */ /* Start the slowpath */
memset(&hw_init_params, 0, sizeof(hw_init_params)); memset(&hw_init_params, 0, sizeof(hw_init_params));
memset(&tunn_info, 0, sizeof(tunn_info));
tunn_info.vxlan.b_mode_enabled = true;
tunn_info.l2_gre.b_mode_enabled = true;
tunn_info.ip_gre.b_mode_enabled = true;
tunn_info.l2_geneve.b_mode_enabled = true;
tunn_info.ip_geneve.b_mode_enabled = true;
tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
hw_init_params.p_tunn = &tunn_info; hw_init_params.p_tunn = &tunn_info;
hw_init_params.b_hw_start = true; hw_init_params.b_hw_start = true;
hw_init_params.int_mode = cdev->int_params.out.int_mode; hw_init_params.int_mode = cdev->int_params.out.int_mode;
...@@ -1007,6 +1022,14 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -1007,6 +1022,14 @@ static int qed_slowpath_start(struct qed_dev *cdev,
DP_INFO(cdev, DP_INFO(cdev,
"HW initialization and function start completed successfully\n"); "HW initialization and function start completed successfully\n");
if (IS_PF(cdev)) {
cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
BIT(QED_MODE_L2GENEVE_TUNN) |
BIT(QED_MODE_IPGENEVE_TUNN) |
BIT(QED_MODE_L2GRE_TUNN) |
BIT(QED_MODE_IPGRE_TUNN));
}
/* Allocate LL2 interface if needed */ /* Allocate LL2 interface if needed */
if (QED_LEADING_HWFN(cdev)->using_ll2) { if (QED_LEADING_HWFN(cdev)->using_ll2) {
rc = qed_ll2_alloc_if(cdev); rc = qed_ll2_alloc_if(cdev);
......
...@@ -409,7 +409,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -409,7 +409,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
*/ */
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_tunn_start_params *p_tunn, struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch); enum qed_mf_mode mode, bool allow_npar_tx_switch);
/** /**
...@@ -442,7 +442,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn); ...@@ -442,7 +442,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
struct qed_tunn_update_params *p_tunn, struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode, enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data); struct qed_spq_comp_cb *p_comp_data);
/** /**
......
...@@ -111,7 +111,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -111,7 +111,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
static enum tunnel_clss qed_tunn_get_clss_type(u8 type) static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
{ {
switch (type) { switch (type) {
case QED_TUNN_CLSS_MAC_VLAN: case QED_TUNN_CLSS_MAC_VLAN:
...@@ -122,206 +122,201 @@ static enum tunnel_clss qed_tunn_get_clss_type(u8 type) ...@@ -122,206 +122,201 @@ static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
return TUNNEL_CLSS_INNER_MAC_VLAN; return TUNNEL_CLSS_INNER_MAC_VLAN;
case QED_TUNN_CLSS_INNER_MAC_VNI: case QED_TUNN_CLSS_INNER_MAC_VNI:
return TUNNEL_CLSS_INNER_MAC_VNI; return TUNNEL_CLSS_INNER_MAC_VNI;
case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
default: default:
return TUNNEL_CLSS_MAC_VLAN; return TUNNEL_CLSS_MAC_VLAN;
} }
} }
static void static void
qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn, qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
struct qed_tunn_update_params *p_src, struct qed_tunnel_info *p_src, bool b_pf_start)
struct pf_update_tunnel_config *p_tunn_cfg)
{ {
unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode; if (p_src->vxlan.b_update_mode || b_pf_start)
unsigned long update_mask = p_src->tunn_mode_update_mask; p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
unsigned long tunn_mode = p_src->tunn_mode;
unsigned long new_tunn_mode = 0;
if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) {
if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
__set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
} else {
if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode))
__set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
}
if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) {
if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
__set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
} else {
if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode))
__set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
}
if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) { if (p_src->l2_gre.b_update_mode || b_pf_start)
if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
__set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
} else {
if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode))
__set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
}
if (p_src->update_geneve_udp_port) {
p_tunn_cfg->set_geneve_udp_port_flg = 1;
p_tunn_cfg->geneve_udp_port =
cpu_to_le16(p_src->geneve_udp_port);
}
if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) { if (p_src->ip_gre.b_update_mode || b_pf_start)
if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
__set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
} else {
if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
__set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
}
if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) { if (p_src->l2_geneve.b_update_mode || b_pf_start)
if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) p_tun->l2_geneve.b_mode_enabled =
__set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode); p_src->l2_geneve.b_mode_enabled;
} else {
if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
__set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
}
p_src->tunn_mode = new_tunn_mode; if (p_src->ip_geneve.b_update_mode || b_pf_start)
p_tun->ip_geneve.b_mode_enabled =
p_src->ip_geneve.b_mode_enabled;
} }
static void static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn, struct qed_tunnel_info *p_src)
struct qed_tunn_update_params *p_src,
struct pf_update_tunnel_config *p_tunn_cfg)
{ {
unsigned long tunn_mode = p_src->tunn_mode;
enum tunnel_clss type; enum tunnel_clss type;
qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg); p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss; p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan); p_tun->vxlan.tun_cls = type;
p_tunn_cfg->tunnel_clss_vxlan = type; type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
p_tun->l2_gre.tun_cls = type;
type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre); type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
p_tunn_cfg->tunnel_clss_l2gre = type; p_tun->ip_gre.tun_cls = type;
type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
p_tun->l2_geneve.tun_cls = type;
type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
p_tun->ip_geneve.tun_cls = type;
}
type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre); static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun,
p_tunn_cfg->tunnel_clss_ipgre = type; struct qed_tunnel_info *p_src)
{
p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
if (p_src->update_vxlan_udp_port) { if (p_src->geneve_port.b_update_port)
p_tunn_cfg->set_vxlan_udp_port_flg = 1; p_tun->geneve_port.port = p_src->geneve_port.port;
p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
}
if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) if (p_src->vxlan_port.b_update_port)
p_tunn_cfg->tx_enable_l2gre = 1; p_tun->vxlan_port.port = p_src->vxlan_port.port;
}
if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) static void
p_tunn_cfg->tx_enable_ipgre = 1; __qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
struct qed_tunn_update_type *tun_type)
{
*p_tunn_cls = tun_type->tun_cls;
if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) if (tun_type->b_mode_enabled)
p_tunn_cfg->tx_enable_vxlan = 1; *p_enable_tx_clas = 1;
}
if (p_src->update_geneve_udp_port) { static void
p_tunn_cfg->set_geneve_udp_port_flg = 1; qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
p_tunn_cfg->geneve_udp_port = struct qed_tunn_update_type *tun_type,
cpu_to_le16(p_src->geneve_udp_port); u8 *p_update_port, __le16 *p_port,
struct qed_tunn_update_udp_port *p_udp_port)
{
__qed_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas, tun_type);
if (p_udp_port->b_update_port) {
*p_update_port = 1;
*p_port = cpu_to_le16(p_udp_port->port);
} }
}
if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) static void
p_tunn_cfg->tx_enable_l2geneve = 1; qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
struct qed_tunnel_info *p_src,
if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) struct pf_update_tunnel_config *p_tunn_cfg)
p_tunn_cfg->tx_enable_ipgeneve = 1; {
struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
p_tunn_cfg->tunnel_clss_l2geneve = type; qed_set_pf_update_tunn_mode(p_tun, p_src, false);
qed_set_tunn_cls_info(p_tun, p_src);
type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve); qed_set_tunn_ports(p_tun, p_src);
p_tunn_cfg->tunnel_clss_ipgeneve = type;
qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
&p_tunn_cfg->tx_enable_vxlan,
&p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
&p_tunn_cfg->vxlan_udp_port,
&p_tun->vxlan_port);
qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
&p_tunn_cfg->tx_enable_l2geneve,
&p_tun->l2_geneve,
&p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
&p_tun->geneve_port);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
&p_tunn_cfg->tx_enable_ipgeneve,
&p_tun->ip_geneve);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
&p_tunn_cfg->tx_enable_l2gre,
&p_tun->l2_gre);
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
&p_tunn_cfg->tx_enable_ipgre,
&p_tun->ip_gre);
p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
} }
static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn, static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
unsigned long tunn_mode) struct qed_tunnel_info *p_tun)
{ {
u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0; qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
u8 l2geneve_enable = 0, ipgeneve_enable = 0; p_tun->ip_gre.b_mode_enabled);
qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
l2gre_enable = 1;
if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
ipgre_enable = 1; p_tun->ip_geneve.b_mode_enabled);
}
if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
vxlan_enable = 1;
qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn,
l2geneve_enable = 1; struct qed_tunnel_info *p_tunn)
{
if (p_tunn->vxlan_port.b_update_port)
qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
p_tunn->vxlan_port.port);
if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) if (p_tunn->geneve_port.b_update_port)
ipgeneve_enable = 1; qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
p_tunn->geneve_port.port);
qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable, qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
ipgeneve_enable);
} }
static void static void
qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
struct qed_tunn_start_params *p_src, struct qed_tunnel_info *p_src,
struct pf_start_tunnel_config *p_tunn_cfg) struct pf_start_tunnel_config *p_tunn_cfg)
{ {
unsigned long tunn_mode; struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
enum tunnel_clss type;
if (!p_src) if (!p_src)
return; return;
tunn_mode = p_src->tunn_mode; qed_set_pf_update_tunn_mode(p_tun, p_src, true);
type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan); qed_set_tunn_cls_info(p_tun, p_src);
p_tunn_cfg->tunnel_clss_vxlan = type; qed_set_tunn_ports(p_tun, p_src);
type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
p_tunn_cfg->tunnel_clss_l2gre = type; qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre); &p_tunn_cfg->tx_enable_vxlan,
p_tunn_cfg->tunnel_clss_ipgre = type; &p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
if (p_src->update_vxlan_udp_port) { &p_tunn_cfg->vxlan_udp_port,
p_tunn_cfg->set_vxlan_udp_port_flg = 1; &p_tun->vxlan_port);
p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
} qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
&p_tunn_cfg->tx_enable_l2geneve,
if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) &p_tun->l2_geneve,
p_tunn_cfg->tx_enable_l2gre = 1; &p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) &p_tun->geneve_port);
p_tunn_cfg->tx_enable_ipgre = 1;
__qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) &p_tunn_cfg->tx_enable_ipgeneve,
p_tunn_cfg->tx_enable_vxlan = 1; &p_tun->ip_geneve);
if (p_src->update_geneve_udp_port) { __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
p_tunn_cfg->set_geneve_udp_port_flg = 1; &p_tunn_cfg->tx_enable_l2gre,
p_tunn_cfg->geneve_udp_port = &p_tun->l2_gre);
cpu_to_le16(p_src->geneve_udp_port);
} __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
&p_tunn_cfg->tx_enable_ipgre,
if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) &p_tun->ip_gre);
p_tunn_cfg->tx_enable_l2geneve = 1;
if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
p_tunn_cfg->tx_enable_ipgeneve = 1;
type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
p_tunn_cfg->tunnel_clss_l2geneve = type;
type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
p_tunn_cfg->tunnel_clss_ipgeneve = type;
} }
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_tunn_start_params *p_tunn, struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch) enum qed_mf_mode mode, bool allow_npar_tx_switch)
{ {
struct pf_start_ramrod_data *p_ramrod = NULL; struct pf_start_ramrod_data *p_ramrod = NULL;
...@@ -416,11 +411,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -416,11 +411,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL); rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (p_tunn) { if (p_tunn)
qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel);
p_tunn->tunn_mode);
p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
}
return rc; return rc;
} }
...@@ -451,7 +443,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn) ...@@ -451,7 +443,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
/* Set pf update ramrod command params */ /* Set pf update ramrod command params */
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
struct qed_tunn_update_params *p_tunn, struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode, enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data) struct qed_spq_comp_cb *p_comp_data)
{ {
...@@ -459,6 +451,12 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, ...@@ -459,6 +451,12 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
struct qed_sp_init_data init_data; struct qed_sp_init_data init_data;
int rc = -EINVAL; int rc = -EINVAL;
if (IS_VF(p_hwfn->cdev))
return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
if (!p_tunn)
return -EINVAL;
/* Get SPQ entry */ /* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data)); memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn); init_data.cid = qed_spq_get_cid(p_hwfn);
...@@ -479,15 +477,7 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, ...@@ -479,15 +477,7 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
return rc; return rc;
if (p_tunn->update_vxlan_udp_port) qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel);
qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
p_tunn->vxlan_udp_port);
if (p_tunn->update_geneve_udp_port)
qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
p_tunn->geneve_udp_port);
qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
return rc; return rc;
} }
......
...@@ -2019,6 +2019,220 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, ...@@ -2019,6 +2019,220 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf); qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
} }
static void
qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
struct qed_tunnel_info *p_tun,
u16 tunn_feature_mask)
{
p_resp->tunn_feature_mask = tunn_feature_mask;
p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
p_resp->geneve_udp_port = p_tun->geneve_port.port;
p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
}
static void
__qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
struct qed_tunn_update_type *p_tun,
enum qed_tunn_mode mask, u8 tun_cls)
{
if (p_req->tun_mode_update_mask & BIT(mask)) {
p_tun->b_update_mode = true;
if (p_req->tunn_mode & BIT(mask))
p_tun->b_mode_enabled = true;
}
p_tun->tun_cls = tun_cls;
}
static void
qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
struct qed_tunn_update_type *p_tun,
struct qed_tunn_update_udp_port *p_port,
enum qed_tunn_mode mask,
u8 tun_cls, u8 update_port, u16 port)
{
if (update_port) {
p_port->b_update_port = true;
p_port->port = port;
}
__qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
}
static bool
qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
{
bool b_update_requested = false;
if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
p_req->update_geneve_port || p_req->update_vxlan_port)
b_update_requested = true;
return b_update_requested;
}
static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc)
{
if (tun->b_update_mode && !tun->b_mode_enabled) {
tun->b_update_mode = false;
*rc = -EINVAL;
}
}
static int
qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn,
u16 *tun_features, bool *update,
struct qed_tunnel_info *tun_src)
{
struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth;
struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel;
u16 bultn_vxlan_port, bultn_geneve_port;
void *cookie = p_hwfn->cdev->ops_cookie;
int i, rc = 0;
*tun_features = p_hwfn->cdev->tunn_feature_mask;
bultn_vxlan_port = tun->vxlan_port.port;
bultn_geneve_port = tun->geneve_port.port;
qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc);
qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc);
qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc);
qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc);
qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc);
if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) &&
(tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) {
tun_src->b_update_rx_cls = false;
tun_src->b_update_tx_cls = false;
rc = -EINVAL;
}
if (tun_src->vxlan_port.b_update_port) {
if (tun_src->vxlan_port.port == tun->vxlan_port.port) {
tun_src->vxlan_port.b_update_port = false;
} else {
*update = true;
bultn_vxlan_port = tun_src->vxlan_port.port;
}
}
if (tun_src->geneve_port.b_update_port) {
if (tun_src->geneve_port.port == tun->geneve_port.port) {
tun_src->geneve_port.b_update_port = false;
} else {
*update = true;
bultn_geneve_port = tun_src->geneve_port.port;
}
}
qed_for_each_vf(p_hwfn, i) {
qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port,
bultn_geneve_port);
}
qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port);
return rc;
}
static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *p_vf)
{
struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct pfvf_update_tunn_param_tlv *p_resp;
struct vfpf_update_tunn_param_tlv *p_req;
u8 status = PFVF_STATUS_SUCCESS;
bool b_update_required = false;
struct qed_tunnel_info tunn;
u16 tunn_feature_mask = 0;
int i, rc = 0;
mbx->offset = (u8 *)mbx->reply_virt;
memset(&tunn, 0, sizeof(tunn));
p_req = &mbx->req_virt->tunn_param_update;
if (!qed_iov_pf_validate_tunn_param(p_req)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"No tunnel update requested by VF\n");
status = PFVF_STATUS_FAILURE;
goto send_resp;
}
tunn.b_update_rx_cls = p_req->update_tun_cls;
tunn.b_update_tx_cls = p_req->update_tun_cls;
qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
QED_MODE_VXLAN_TUNN, p_req->vxlan_clss,
p_req->update_vxlan_port,
p_req->vxlan_port);
qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
QED_MODE_L2GENEVE_TUNN,
p_req->l2geneve_clss,
p_req->update_geneve_port,
p_req->geneve_port);
__qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
QED_MODE_IPGENEVE_TUNN,
p_req->ipgeneve_clss);
__qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
QED_MODE_L2GRE_TUNN, p_req->l2gre_clss);
__qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
QED_MODE_IPGRE_TUNN, p_req->ipgre_clss);
/* If PF modifies VF's req then it should
* still return an error in case of partial configuration
* or modified configuration as opposed to requested one.
*/
rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask,
&b_update_required, &tunn);
if (rc)
status = PFVF_STATUS_FAILURE;
/* If QED client is willing to update anything ? */
if (b_update_required) {
u16 geneve_port;
rc = qed_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
QED_SPQ_MODE_EBLOCK, NULL);
if (rc)
status = PFVF_STATUS_FAILURE;
geneve_port = p_tun->geneve_port.port;
qed_for_each_vf(p_hwfn, i) {
qed_iov_bulletin_set_udp_ports(p_hwfn, i,
p_tun->vxlan_port.port,
geneve_port);
}
}
send_resp:
p_resp = qed_add_tlv(p_hwfn, &mbx->offset,
CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
}
static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct qed_vf_info *p_vf, u8 status) struct qed_vf_info *p_vf, u8 status)
...@@ -3275,6 +3489,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, ...@@ -3275,6 +3489,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
case CHANNEL_TLV_RELEASE: case CHANNEL_TLV_RELEASE:
qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
break; break;
case CHANNEL_TLV_UPDATE_TUNN_PARAM:
qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
break;
} }
} else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV, DP_VERBOSE(p_hwfn, QED_MSG_IOV,
...@@ -3511,6 +3728,29 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, ...@@ -3511,6 +3728,29 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
} }
void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
int vfid, u16 vxlan_port, u16 geneve_port)
{
struct qed_vf_info *vf_info;
vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info) {
DP_NOTICE(p_hwfn->cdev,
"Can not set udp ports, invalid vfid [%d]\n", vfid);
return;
}
if (vf_info->b_malicious) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Can not set udp ports to malicious VF [%d]\n",
vfid);
return;
}
vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
}
static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
{ {
struct qed_vf_info *p_vf_info; struct qed_vf_info *p_vf_info;
......
...@@ -270,6 +270,9 @@ enum qed_iov_wq_flag { ...@@ -270,6 +270,9 @@ enum qed_iov_wq_flag {
*/ */
u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
int vfid, u16 vxlan_port, u16 geneve_port);
/** /**
* @brief Read sriov related information and allocated resources * @brief Read sriov related information and allocated resources
* reads from configuraiton space, shmem, etc. * reads from configuraiton space, shmem, etc.
...@@ -378,6 +381,12 @@ static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, ...@@ -378,6 +381,12 @@ static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
return MAX_NUM_VFS; return MAX_NUM_VFS;
} }
static inline void
qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid,
u16 vxlan_port, u16 geneve_port)
{
}
static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn) static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
{ {
return 0; return 0;
......
...@@ -418,6 +418,155 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) ...@@ -418,6 +418,155 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
static void
__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
struct qed_tunn_update_type *p_src,
enum qed_tunn_clss mask, u8 *p_cls)
{
if (p_src->b_update_mode) {
p_req->tun_mode_update_mask |= BIT(mask);
if (p_src->b_mode_enabled)
p_req->tunn_mode |= BIT(mask);
}
*p_cls = p_src->tun_cls;
}
static void
qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
struct qed_tunn_update_type *p_src,
enum qed_tunn_clss mask,
u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
u8 *p_update_port, u16 *p_udp_port)
{
if (p_port->b_update_port) {
*p_update_port = 1;
*p_udp_port = p_port->port;
}
__qed_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls);
}
void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
{
if (p_tun->vxlan.b_mode_enabled)
p_tun->vxlan.b_update_mode = true;
if (p_tun->l2_geneve.b_mode_enabled)
p_tun->l2_geneve.b_update_mode = true;
if (p_tun->ip_geneve.b_mode_enabled)
p_tun->ip_geneve.b_update_mode = true;
if (p_tun->l2_gre.b_mode_enabled)
p_tun->l2_gre.b_update_mode = true;
if (p_tun->ip_gre.b_mode_enabled)
p_tun->ip_gre.b_update_mode = true;
p_tun->b_update_rx_cls = true;
p_tun->b_update_tx_cls = true;
}
static void
__qed_vf_update_tunn_param(struct qed_tunn_update_type *p_tun,
u16 feature_mask, u8 tunn_mode,
u8 tunn_cls, enum qed_tunn_mode val)
{
if (feature_mask & BIT(val)) {
p_tun->b_mode_enabled = tunn_mode;
p_tun->tun_cls = tunn_cls;
} else {
p_tun->b_mode_enabled = false;
}
}
static void qed_vf_update_tunn_param(struct qed_hwfn *p_hwfn,
struct qed_tunnel_info *p_tun,
struct pfvf_update_tunn_param_tlv *p_resp)
{
/* Update mode and classes provided by PF */
u16 feat_mask = p_resp->tunn_feature_mask;
__qed_vf_update_tunn_param(&p_tun->vxlan, feat_mask,
p_resp->vxlan_mode, p_resp->vxlan_clss,
QED_MODE_VXLAN_TUNN);
__qed_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask,
p_resp->l2geneve_mode,
p_resp->l2geneve_clss,
QED_MODE_L2GENEVE_TUNN);
__qed_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask,
p_resp->ipgeneve_mode,
p_resp->ipgeneve_clss,
QED_MODE_IPGENEVE_TUNN);
__qed_vf_update_tunn_param(&p_tun->l2_gre, feat_mask,
p_resp->l2gre_mode, p_resp->l2gre_clss,
QED_MODE_L2GRE_TUNN);
__qed_vf_update_tunn_param(&p_tun->ip_gre, feat_mask,
p_resp->ipgre_mode, p_resp->ipgre_clss,
QED_MODE_IPGRE_TUNN);
p_tun->geneve_port.port = p_resp->geneve_udp_port;
p_tun->vxlan_port.port = p_resp->vxlan_udp_port;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled,
p_tun->ip_geneve.b_mode_enabled,
p_tun->l2_gre.b_mode_enabled, p_tun->ip_gre.b_mode_enabled);
}
int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
struct qed_tunnel_info *p_src)
{
struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_update_tunn_param_tlv *p_resp;
struct vfpf_update_tunn_param_tlv *p_req;
int rc;
p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM,
sizeof(*p_req));
if (p_src->b_update_rx_cls && p_src->b_update_tx_cls)
p_req->update_tun_cls = 1;
qed_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, QED_MODE_VXLAN_TUNN,
&p_req->vxlan_clss, &p_src->vxlan_port,
&p_req->update_vxlan_port,
&p_req->vxlan_port);
qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve,
QED_MODE_L2GENEVE_TUNN,
&p_req->l2geneve_clss, &p_src->geneve_port,
&p_req->update_geneve_port,
&p_req->geneve_port);
__qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve,
QED_MODE_IPGENEVE_TUNN,
&p_req->ipgeneve_clss);
__qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre,
QED_MODE_L2GRE_TUNN, &p_req->l2gre_clss);
__qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre,
QED_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
if (rc)
goto exit;
if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Failed to update tunnel parameters\n");
rc = -EINVAL;
}
qed_vf_update_tunn_param(p_hwfn, p_tun, p_resp);
exit:
qed_vf_pf_req_end(p_hwfn, rc);
return rc;
}
int int
qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid, struct qed_queue_cid *p_cid,
...@@ -1251,6 +1400,18 @@ static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn, ...@@ -1251,6 +1400,18 @@ static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
return true; return true;
} }
static void
qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn,
u16 *p_vxlan_port, u16 *p_geneve_port)
{
struct qed_bulletin_content *p_bulletin;
p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
*p_vxlan_port = p_bulletin->vxlan_udp_port;
*p_geneve_port = p_bulletin->geneve_udp_port;
}
void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor, u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng) u16 *fw_rev, u16 *fw_eng)
...@@ -1270,12 +1431,16 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) ...@@ -1270,12 +1431,16 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth; struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced; u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
void *cookie = hwfn->cdev->ops_cookie; void *cookie = hwfn->cdev->ops_cookie;
u16 vxlan_port, geneve_port;
qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port);
is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
&is_mac_forced); &is_mac_forced);
if (is_mac_exist && cookie) if (is_mac_exist && cookie)
ops->force_mac(cookie, mac, !!is_mac_forced); ops->force_mac(cookie, mac, !!is_mac_forced);
ops->ports_update(cookie, vxlan_port, geneve_port);
/* Always update link configuration according to bulletin */ /* Always update link configuration according to bulletin */
qed_link_update(hwfn); qed_link_update(hwfn);
} }
......
...@@ -429,6 +429,43 @@ struct vfpf_ucast_filter_tlv { ...@@ -429,6 +429,43 @@ struct vfpf_ucast_filter_tlv {
u16 padding[3]; u16 padding[3];
}; };
/* tunnel update param tlv */
struct vfpf_update_tunn_param_tlv {
struct vfpf_first_tlv first_tlv;
u8 tun_mode_update_mask;
u8 tunn_mode;
u8 update_tun_cls;
u8 vxlan_clss;
u8 l2gre_clss;
u8 ipgre_clss;
u8 l2geneve_clss;
u8 ipgeneve_clss;
u8 update_geneve_port;
u8 update_vxlan_port;
u16 geneve_port;
u16 vxlan_port;
u8 padding[2];
};
struct pfvf_update_tunn_param_tlv {
struct pfvf_tlv hdr;
u16 tunn_feature_mask;
u8 vxlan_mode;
u8 l2geneve_mode;
u8 ipgeneve_mode;
u8 l2gre_mode;
u8 ipgre_mode;
u8 vxlan_clss;
u8 l2gre_clss;
u8 ipgre_clss;
u8 l2geneve_clss;
u8 ipgeneve_clss;
u16 vxlan_udp_port;
u16 geneve_udp_port;
};
struct tlv_buffer_size { struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE]; u8 tlv_buffer[TLV_BUFFER_SIZE];
}; };
...@@ -444,6 +481,7 @@ union vfpf_tlvs { ...@@ -444,6 +481,7 @@ union vfpf_tlvs {
struct vfpf_vport_start_tlv start_vport; struct vfpf_vport_start_tlv start_vport;
struct vfpf_vport_update_tlv vport_update; struct vfpf_vport_update_tlv vport_update;
struct vfpf_ucast_filter_tlv ucast_filter; struct vfpf_ucast_filter_tlv ucast_filter;
struct vfpf_update_tunn_param_tlv tunn_param_update;
struct channel_list_end_tlv list_end; struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size; struct tlv_buffer_size tlv_buf_size;
}; };
...@@ -453,6 +491,7 @@ union pfvf_tlvs { ...@@ -453,6 +491,7 @@ union pfvf_tlvs {
struct pfvf_acquire_resp_tlv acquire_resp; struct pfvf_acquire_resp_tlv acquire_resp;
struct tlv_buffer_size tlv_buf_size; struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start; struct pfvf_start_queue_resp_tlv queue_start;
struct pfvf_update_tunn_param_tlv tunn_param_resp;
}; };
enum qed_bulletin_bit { enum qed_bulletin_bit {
...@@ -513,7 +552,9 @@ struct qed_bulletin_content { ...@@ -513,7 +552,9 @@ struct qed_bulletin_content {
u8 partner_rx_flow_ctrl_en; u8 partner_rx_flow_ctrl_en;
u8 partner_adv_pause; u8 partner_adv_pause;
u8 sfp_tx_fault; u8 sfp_tx_fault;
u8 padding4[6]; u16 vxlan_udp_port;
u16 geneve_udp_port;
u8 padding4[2];
u32 speed; u32 speed;
u32 partner_adv_speed; u32 partner_adv_speed;
...@@ -555,6 +596,7 @@ enum { ...@@ -555,6 +596,7 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_RSS, CHANNEL_TLV_VPORT_UPDATE_RSS,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
CHANNEL_TLV_UPDATE_TUNN_PARAM,
CHANNEL_TLV_MAX, CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs. /* Required for iterating over vport-update tlvs.
...@@ -872,6 +914,9 @@ void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, ...@@ -872,6 +914,9 @@ void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_bulletin_content *p_bulletin); struct qed_bulletin_content *p_bulletin);
void qed_iov_vf_task(struct work_struct *work); void qed_iov_vf_task(struct work_struct *work);
void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun);
int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
struct qed_tunnel_info *p_tunn);
#else #else
static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params) struct qed_mcp_link_params *params)
...@@ -1033,6 +1078,17 @@ __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, ...@@ -1033,6 +1078,17 @@ __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
static inline void qed_iov_vf_task(struct work_struct *work) static inline void qed_iov_vf_task(struct work_struct *work)
{ {
} }
static inline void
qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
{
}
static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
struct qed_tunnel_info *p_tunn)
{
return -EINVAL;
}
#endif #endif
#endif #endif
...@@ -442,8 +442,6 @@ struct qede_fastpath { ...@@ -442,8 +442,6 @@ struct qede_fastpath {
#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2) #define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
#define QEDE_SP_RX_MODE 1 #define QEDE_SP_RX_MODE 1
#define QEDE_SP_VXLAN_PORT_CONFIG 2
#define QEDE_SP_GENEVE_PORT_CONFIG 3
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
...@@ -482,6 +480,7 @@ irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie); ...@@ -482,6 +480,7 @@ irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie);
/* Filtering function definitions */ /* Filtering function definitions */
void qede_force_mac(void *dev, u8 *mac, bool forced); void qede_force_mac(void *dev, u8 *mac, bool forced);
void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port);
int qede_set_mac_addr(struct net_device *ndev, void *p); int qede_set_mac_addr(struct net_device *ndev, void *p);
int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid); int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
......
...@@ -480,6 +480,17 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -480,6 +480,17 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
} }
#endif #endif
void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
{
struct qede_dev *edev = dev;
if (edev->vxlan_dst_port != vxlan_port)
edev->vxlan_dst_port = 0;
if (edev->geneve_dst_port != geneve_port)
edev->geneve_dst_port = 0;
}
void qede_force_mac(void *dev, u8 *mac, bool forced) void qede_force_mac(void *dev, u8 *mac, bool forced)
{ {
struct qede_dev *edev = dev; struct qede_dev *edev = dev;
...@@ -883,69 +894,112 @@ int qede_set_features(struct net_device *dev, netdev_features_t features) ...@@ -883,69 +894,112 @@ int qede_set_features(struct net_device *dev, netdev_features_t features)
void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
{ {
struct qede_dev *edev = netdev_priv(dev); struct qede_dev *edev = netdev_priv(dev);
struct qed_tunn_params tunn_params;
u16 t_port = ntohs(ti->port); u16 t_port = ntohs(ti->port);
int rc;
memset(&tunn_params, 0, sizeof(tunn_params));
switch (ti->type) { switch (ti->type) {
case UDP_TUNNEL_TYPE_VXLAN: case UDP_TUNNEL_TYPE_VXLAN:
if (!edev->dev_info.common.vxlan_enable)
return;
if (edev->vxlan_dst_port) if (edev->vxlan_dst_port)
return; return;
edev->vxlan_dst_port = t_port; tunn_params.update_vxlan_port = 1;
tunn_params.vxlan_port = t_port;
__qede_lock(edev);
rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
__qede_unlock(edev);
if (!rc) {
edev->vxlan_dst_port = t_port;
DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n", DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
t_port); t_port);
} else {
DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
t_port);
}
set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
break; break;
case UDP_TUNNEL_TYPE_GENEVE: case UDP_TUNNEL_TYPE_GENEVE:
if (!edev->dev_info.common.geneve_enable)
return;
if (edev->geneve_dst_port) if (edev->geneve_dst_port)
return; return;
edev->geneve_dst_port = t_port; tunn_params.update_geneve_port = 1;
tunn_params.geneve_port = t_port;
__qede_lock(edev);
rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
__qede_unlock(edev);
DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n", if (!rc) {
edev->geneve_dst_port = t_port;
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Added geneve port=%d\n", t_port);
} else {
DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
t_port); t_port);
set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); }
break; break;
default: default:
return; return;
} }
schedule_delayed_work(&edev->sp_task, 0);
} }
void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti) void qede_udp_tunnel_del(struct net_device *dev,
struct udp_tunnel_info *ti)
{ {
struct qede_dev *edev = netdev_priv(dev); struct qede_dev *edev = netdev_priv(dev);
struct qed_tunn_params tunn_params;
u16 t_port = ntohs(ti->port); u16 t_port = ntohs(ti->port);
memset(&tunn_params, 0, sizeof(tunn_params));
switch (ti->type) { switch (ti->type) {
case UDP_TUNNEL_TYPE_VXLAN: case UDP_TUNNEL_TYPE_VXLAN:
if (t_port != edev->vxlan_dst_port) if (t_port != edev->vxlan_dst_port)
return; return;
tunn_params.update_vxlan_port = 1;
tunn_params.vxlan_port = 0;
__qede_lock(edev);
edev->ops->tunn_config(edev->cdev, &tunn_params);
__qede_unlock(edev);
edev->vxlan_dst_port = 0; edev->vxlan_dst_port = 0;
DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n", DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
t_port); t_port);
set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
break; break;
case UDP_TUNNEL_TYPE_GENEVE: case UDP_TUNNEL_TYPE_GENEVE:
if (t_port != edev->geneve_dst_port) if (t_port != edev->geneve_dst_port)
return; return;
tunn_params.update_geneve_port = 1;
tunn_params.geneve_port = 0;
__qede_lock(edev);
edev->ops->tunn_config(edev->cdev, &tunn_params);
__qede_unlock(edev);
edev->geneve_dst_port = 0; edev->geneve_dst_port = 0;
DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n", DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
t_port); t_port);
set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
break; break;
default: default:
return; return;
} }
schedule_delayed_work(&edev->sp_task, 0);
} }
static void qede_xdp_reload_func(struct qede_dev *edev, static void qede_xdp_reload_func(struct qede_dev *edev,
......
...@@ -1697,14 +1697,25 @@ netdev_features_t qede_features_check(struct sk_buff *skb, ...@@ -1697,14 +1697,25 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
} }
/* Disable offloads for geneve tunnels, as HW can't parse /* Disable offloads for geneve tunnels, as HW can't parse
* the geneve header which has option length greater than 32B. * the geneve header which has option length greater than 32b
* and disable offloads for the ports which are not offloaded.
*/ */
if ((l4_proto == IPPROTO_UDP) && if (l4_proto == IPPROTO_UDP) {
((skb_inner_mac_header(skb) - struct qede_dev *edev = netdev_priv(dev);
skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN)) u16 hdrlen, vxln_port, gnv_port;
hdrlen = QEDE_MAX_TUN_HDR_LEN;
vxln_port = edev->vxlan_dst_port;
gnv_port = edev->geneve_dst_port;
if ((skb_inner_mac_header(skb) -
skb_transport_header(skb)) > hdrlen ||
(ntohs(udp_hdr(skb)->dest) != vxln_port &&
ntohs(udp_hdr(skb)->dest) != gnv_port))
return features & ~(NETIF_F_CSUM_MASK | return features & ~(NETIF_F_CSUM_MASK |
NETIF_F_GSO_MASK); NETIF_F_GSO_MASK);
} }
}
return features; return features;
} }
...@@ -231,6 +231,7 @@ static struct qed_eth_cb_ops qede_ll_ops = { ...@@ -231,6 +231,7 @@ static struct qed_eth_cb_ops qede_ll_ops = {
.link_update = qede_link_update, .link_update = qede_link_update,
}, },
.force_mac = qede_force_mac, .force_mac = qede_force_mac,
.ports_update = qede_udp_ports_update,
}; };
static int qede_netdev_event(struct notifier_block *this, unsigned long event, static int qede_netdev_event(struct notifier_block *this, unsigned long event,
...@@ -609,6 +610,7 @@ static void qede_init_ndev(struct qede_dev *edev) ...@@ -609,6 +610,7 @@ static void qede_init_ndev(struct qede_dev *edev)
{ {
struct net_device *ndev = edev->ndev; struct net_device *ndev = edev->ndev;
struct pci_dev *pdev = edev->pdev; struct pci_dev *pdev = edev->pdev;
bool udp_tunnel_enable = false;
netdev_features_t hw_features; netdev_features_t hw_features;
pci_set_drvdata(pdev, ndev); pci_set_drvdata(pdev, ndev);
...@@ -631,20 +633,33 @@ static void qede_init_ndev(struct qede_dev *edev) ...@@ -631,20 +633,33 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6; NETIF_F_TSO | NETIF_F_TSO6;
/* Encap features*/
hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_GRE_CSUM;
if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
hw_features |= NETIF_F_NTUPLE; hw_features |= NETIF_F_NTUPLE;
if (edev->dev_info.common.vxlan_enable ||
edev->dev_info.common.geneve_enable)
udp_tunnel_enable = true;
if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
hw_features |= NETIF_F_TSO_ECN;
ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_SG | NETIF_F_TSO |
NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM | NETIF_F_RXCSUM;
NETIF_F_GSO_UDP_TUNNEL_CSUM | }
NETIF_F_GSO_GRE_CSUM;
if (udp_tunnel_enable) {
hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM);
ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM);
}
if (edev->dev_info.common.gre_enable) {
hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM);
}
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HIGHDMA; NETIF_F_HIGHDMA;
...@@ -782,7 +797,6 @@ static void qede_sp_task(struct work_struct *work) ...@@ -782,7 +797,6 @@ static void qede_sp_task(struct work_struct *work)
{ {
struct qede_dev *edev = container_of(work, struct qede_dev, struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work); sp_task.work);
struct qed_dev *cdev = edev->cdev;
__qede_lock(edev); __qede_lock(edev);
...@@ -790,24 +804,6 @@ static void qede_sp_task(struct work_struct *work) ...@@ -790,24 +804,6 @@ static void qede_sp_task(struct work_struct *work)
if (edev->state == QEDE_STATE_OPEN) if (edev->state == QEDE_STATE_OPEN)
qede_config_rx_mode(edev->ndev); qede_config_rx_mode(edev->ndev);
if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
struct qed_tunn_params tunn_params;
memset(&tunn_params, 0, sizeof(tunn_params));
tunn_params.update_vxlan_port = 1;
tunn_params.vxlan_port = edev->vxlan_dst_port;
qed_ops->tunn_config(cdev, &tunn_params);
}
if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
struct qed_tunn_params tunn_params;
memset(&tunn_params, 0, sizeof(tunn_params));
tunn_params.update_geneve_port = 1;
tunn_params.geneve_port = edev->geneve_dst_port;
qed_ops->tunn_config(cdev, &tunn_params);
}
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) { if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
if (edev->state == QEDE_STATE_OPEN) if (edev->state == QEDE_STATE_OPEN)
......
...@@ -158,6 +158,7 @@ struct qed_tunn_params { ...@@ -158,6 +158,7 @@ struct qed_tunn_params {
struct qed_eth_cb_ops { struct qed_eth_cb_ops {
struct qed_common_cb_ops common; struct qed_common_cb_ops common;
void (*force_mac) (void *dev, u8 *mac, bool forced); void (*force_mac) (void *dev, u8 *mac, bool forced);
void (*ports_update)(void *dev, u16 vxlan_port, u16 geneve_port);
}; };
#define QED_MAX_PHC_DRIFT_PPB 291666666 #define QED_MAX_PHC_DRIFT_PPB 291666666
......
...@@ -338,6 +338,11 @@ struct qed_dev_info { ...@@ -338,6 +338,11 @@ struct qed_dev_info {
bool wol_support; bool wol_support;
enum qed_dev_type dev_type; enum qed_dev_type dev_type;
/* Output parameters for qede */
bool vxlan_enable;
bool gre_enable;
bool geneve_enable;
}; };
enum qed_sb_type { enum qed_sb_type {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment