Commit ed3849e4 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'sfc-vf-representors-for-ef100-rx-side'

Edward Cree says:

====================
sfc: VF representors for EF100 - RX side

This series adds the receive path for EF100 VF representors, plus other
 minor features such as statistics.
====================

Link: https://lore.kernel.org/r/cover.1659034549.git.ecree.xilinx@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 71930846 7267aa6d
...@@ -8,7 +8,8 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \ ...@@ -8,7 +8,8 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
ef100.o ef100_nic.o ef100_netdev.o \ ef100.o ef100_nic.o ef100_netdev.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o ef100_ethtool.o ef100_rx.o ef100_tx.o
sfc-$(CONFIG_SFC_MTD) += mtd.o sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o mae.o sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
mae.o tc.o
obj-$(CONFIG_SFC) += sfc.o obj-$(CONFIG_SFC) += sfc.o
......
...@@ -2538,23 +2538,33 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) ...@@ -2538,23 +2538,33 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
if (rc) if (rc)
return rc; return rc;
down_write(&efx->filter_sem);
rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807); rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807);
if (rc) if (rc)
return rc; goto out_unlock;
list_for_each_entry(vlan, &nic_data->vlan_list, list) { list_for_each_entry(vlan, &nic_data->vlan_list, list) {
rc = efx_mcdi_filter_add_vlan(efx, vlan->vid); rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
if (rc) if (rc)
goto fail_add_vlan; goto fail_add_vlan;
} }
return 0; goto out_unlock;
fail_add_vlan: fail_add_vlan:
efx_mcdi_filter_table_remove(efx); efx_mcdi_filter_table_remove(efx);
out_unlock:
up_write(&efx->filter_sem);
return rc; return rc;
} }
static void efx_ef10_filter_table_remove(struct efx_nic *efx)
{
down_write(&efx->filter_sem);
efx_mcdi_filter_table_remove(efx);
up_write(&efx->filter_sem);
}
/* This creates an entry in the RX descriptor queue */ /* This creates an entry in the RX descriptor queue */
static inline void static inline void
efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
...@@ -3211,9 +3221,7 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) ...@@ -3211,9 +3221,7 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
efx_device_detach_sync(efx); efx_device_detach_sync(efx);
efx_net_stop(efx->net_dev); efx_net_stop(efx->net_dev);
down_write(&efx->filter_sem); efx_ef10_filter_table_remove(efx);
efx_mcdi_filter_table_remove(efx);
up_write(&efx->filter_sem);
rc = efx_ef10_vadaptor_free(efx, efx->vport_id); rc = efx_ef10_vadaptor_free(efx, efx->vport_id);
if (rc) if (rc)
...@@ -3243,9 +3251,7 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) ...@@ -3243,9 +3251,7 @@ static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
if (rc2) if (rc2)
goto reset_nic; goto reset_nic;
restore_filters: restore_filters:
down_write(&efx->filter_sem);
rc2 = efx_ef10_filter_table_probe(efx); rc2 = efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
if (rc2) if (rc2)
goto reset_nic; goto reset_nic;
...@@ -3275,8 +3281,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) ...@@ -3275,8 +3281,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx_net_stop(efx->net_dev); efx_net_stop(efx->net_dev);
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem); efx_ef10_filter_table_remove(efx);
efx_mcdi_filter_table_remove(efx);
ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR), ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
efx->net_dev->dev_addr); efx->net_dev->dev_addr);
...@@ -3286,7 +3291,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) ...@@ -3286,7 +3291,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
sizeof(inbuf), NULL, 0, NULL); sizeof(inbuf), NULL, 0, NULL);
efx_ef10_filter_table_probe(efx); efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
if (was_enabled) if (was_enabled)
...@@ -4092,7 +4096,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { ...@@ -4092,7 +4096,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.ev_test_generate = efx_ef10_ev_test_generate, .ev_test_generate = efx_ef10_ev_test_generate,
.filter_table_probe = efx_ef10_filter_table_probe, .filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore, .filter_table_restore = efx_mcdi_filter_table_restore,
.filter_table_remove = efx_mcdi_filter_table_remove, .filter_table_remove = efx_ef10_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter, .filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
.filter_insert = efx_mcdi_filter_insert, .filter_insert = efx_mcdi_filter_insert,
.filter_remove_safe = efx_mcdi_filter_remove_safe, .filter_remove_safe = efx_mcdi_filter_remove_safe,
......
...@@ -431,6 +431,9 @@ static void ef100_pci_remove(struct pci_dev *pci_dev) ...@@ -431,6 +431,9 @@ static void ef100_pci_remove(struct pci_dev *pci_dev)
probe_data = container_of(efx, struct efx_probe_data, efx); probe_data = container_of(efx, struct efx_probe_data, efx);
ef100_remove_netdev(probe_data); ef100_remove_netdev(probe_data);
#ifdef CONFIG_SFC_SRIOV
efx_fini_struct_tc(efx);
#endif
ef100_remove(efx); ef100_remove(efx);
efx_fini_io(efx); efx_fini_io(efx);
......
...@@ -329,6 +329,10 @@ void ef100_remove_netdev(struct efx_probe_data *probe_data) ...@@ -329,6 +329,10 @@ void ef100_remove_netdev(struct efx_probe_data *probe_data)
ef100_unregister_netdev(efx); ef100_unregister_netdev(efx);
#ifdef CONFIG_SFC_SRIOV
efx_fini_tc(efx);
#endif
down_write(&efx->filter_sem); down_write(&efx->filter_sem);
efx_mcdi_filter_table_remove(efx); efx_mcdi_filter_table_remove(efx);
up_write(&efx->filter_sem); up_write(&efx->filter_sem);
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include "ef100_tx.h" #include "ef100_tx.h"
#include "ef100_sriov.h" #include "ef100_sriov.h"
#include "ef100_netdev.h" #include "ef100_netdev.h"
#include "tc.h"
#include "mae.h"
#include "rx_common.h" #include "rx_common.h"
#define EF100_MAX_VIS 4096 #define EF100_MAX_VIS 4096
...@@ -374,26 +376,46 @@ static int ef100_filter_table_up(struct efx_nic *efx) ...@@ -374,26 +376,46 @@ static int ef100_filter_table_up(struct efx_nic *efx)
{ {
int rc; int rc;
down_write(&efx->filter_sem);
rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC); rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
if (rc) { if (rc)
efx_mcdi_filter_table_down(efx); goto fail_unspec;
return rc;
}
rc = efx_mcdi_filter_add_vlan(efx, 0); rc = efx_mcdi_filter_add_vlan(efx, 0);
if (rc) { if (rc)
efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC); goto fail_vlan0;
efx_mcdi_filter_table_down(efx); /* Drop the lock: we've finished altering table existence, and
} * filter insertion will need to take the lock for read.
*/
up_write(&efx->filter_sem);
#ifdef CONFIG_SFC_SRIOV
rc = efx_tc_insert_rep_filters(efx);
/* Rep filter failure is nonfatal */
if (rc)
netif_warn(efx, drv, efx->net_dev,
"Failed to insert representor filters, rc %d\n",
rc);
#endif
return 0;
fail_vlan0:
efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
fail_unspec:
efx_mcdi_filter_table_down(efx);
up_write(&efx->filter_sem);
return rc; return rc;
} }
static void ef100_filter_table_down(struct efx_nic *efx) static void ef100_filter_table_down(struct efx_nic *efx)
{ {
#ifdef CONFIG_SFC_SRIOV
efx_tc_remove_rep_filters(efx);
#endif
down_write(&efx->filter_sem);
efx_mcdi_filter_del_vlan(efx, 0); efx_mcdi_filter_del_vlan(efx, 0);
efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC); efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
efx_mcdi_filter_table_down(efx); efx_mcdi_filter_table_down(efx);
up_write(&efx->filter_sem);
} }
/* Other /* Other
...@@ -704,6 +726,31 @@ static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx) ...@@ -704,6 +726,31 @@ static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx)
return 10 * EFX_RECYCLE_RING_SIZE_10G; return 10 * EFX_RECYCLE_RING_SIZE_10G;
} }
#ifdef CONFIG_SFC_SRIOV
static int efx_ef100_get_base_mport(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
u32 selector, id;
int rc;
/* Construct mport selector for "physical network port" */
efx_mae_mport_wire(efx, &selector);
/* Look up actual mport ID */
rc = efx_mae_lookup_mport(efx, selector, &id);
if (rc)
return rc;
/* The ID should always fit in 16 bits, because that's how wide the
* corresponding fields in the RX prefix & TX override descriptor are
*/
if (id >> 16)
netif_warn(efx, probe, efx->net_dev, "Bad base m-port id %#x\n",
id);
nic_data->base_mport = id;
nic_data->have_mport = true;
return 0;
}
#endif
static int compare_versions(const char *a, const char *b) static int compare_versions(const char *a, const char *b)
{ {
int a_major, a_minor, a_point, a_patch; int a_major, a_minor, a_point, a_patch;
...@@ -1064,6 +1111,34 @@ int ef100_probe_netdev_pf(struct efx_nic *efx) ...@@ -1064,6 +1111,34 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
eth_hw_addr_set(net_dev, net_dev->perm_addr); eth_hw_addr_set(net_dev, net_dev->perm_addr);
memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN); memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
if (!nic_data->grp_mae)
return 0;
#ifdef CONFIG_SFC_SRIOV
rc = efx_init_struct_tc(efx);
if (rc)
return rc;
rc = efx_ef100_get_base_mport(efx);
if (rc) {
netif_warn(efx, probe, net_dev,
"Failed to probe base mport rc %d; representors will not function\n",
rc);
}
rc = efx_init_tc(efx);
if (rc) {
/* Either we don't have an MAE at all (i.e. legacy v-switching),
* or we do but we failed to probe it. In the latter case, we
* may not have set up default rules, in which case we won't be
* able to pass any traffic. However, we don't fail the probe,
* because the user might need to use the netdevice to apply
* configuration changes to fix whatever's wrong with the MAE.
*/
netif_warn(efx, probe, net_dev, "Failed to probe MAE rc %d\n",
rc);
}
#endif
return 0; return 0;
fail: fail:
......
...@@ -72,6 +72,8 @@ struct ef100_nic_data { ...@@ -72,6 +72,8 @@ struct ef100_nic_data {
u8 port_id[ETH_ALEN]; u8 port_id[ETH_ALEN];
DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS); DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS);
u64 stats[EF100_STAT_COUNT]; u64 stats[EF100_STAT_COUNT];
u32 base_mport;
bool have_mport; /* base_mport was populated successfully */
bool grp_mae; /* MAE Privilege */ bool grp_mae; /* MAE Privilege */
u16 tso_max_hdr_len; u16 tso_max_hdr_len;
u16 tso_max_payload_num_segs; u16 tso_max_payload_num_segs;
......
...@@ -13,15 +13,24 @@ ...@@ -13,15 +13,24 @@
#include "ef100_netdev.h" #include "ef100_netdev.h"
#include "ef100_nic.h" #include "ef100_nic.h"
#include "mae.h" #include "mae.h"
#include "rx_common.h"
#define EFX_EF100_REP_DRIVER "efx_ef100_rep" #define EFX_EF100_REP_DRIVER "efx_ef100_rep"
#define EFX_REP_DEFAULT_PSEUDO_RING_SIZE 64
static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv, static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
unsigned int i) unsigned int i)
{ {
efv->parent = efx; efv->parent = efx;
efv->idx = i; efv->idx = i;
INIT_LIST_HEAD(&efv->list); INIT_LIST_HEAD(&efv->list);
efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
INIT_LIST_HEAD(&efv->dflt.acts.list);
INIT_LIST_HEAD(&efv->rx_list);
spin_lock_init(&efv->rx_lock);
efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
...@@ -29,6 +38,25 @@ static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv, ...@@ -29,6 +38,25 @@ static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
return 0; return 0;
} }
static int efx_ef100_rep_open(struct net_device *net_dev)
{
struct efx_rep *efv = netdev_priv(net_dev);
netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll,
NAPI_POLL_WEIGHT);
napi_enable(&efv->napi);
return 0;
}
static int efx_ef100_rep_close(struct net_device *net_dev)
{
struct efx_rep *efv = netdev_priv(net_dev);
napi_disable(&efv->napi);
netif_napi_del(&efv->napi);
return 0;
}
static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb, static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
struct net_device *dev) struct net_device *dev)
{ {
...@@ -79,10 +107,26 @@ static int efx_ef100_rep_get_phys_port_name(struct net_device *dev, ...@@ -79,10 +107,26 @@ static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
return 0; return 0;
} }
static void efx_ef100_rep_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct efx_rep *efv = netdev_priv(dev);
stats->rx_packets = atomic64_read(&efv->stats.rx_packets);
stats->tx_packets = atomic64_read(&efv->stats.tx_packets);
stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes);
stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes);
stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped);
stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
}
static const struct net_device_ops efx_ef100_rep_netdev_ops = { static const struct net_device_ops efx_ef100_rep_netdev_ops = {
.ndo_open = efx_ef100_rep_open,
.ndo_stop = efx_ef100_rep_close,
.ndo_start_xmit = efx_ef100_rep_xmit, .ndo_start_xmit = efx_ef100_rep_xmit,
.ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id, .ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
.ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name, .ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
.ndo_get_stats64 = efx_ef100_rep_get_stats64,
}; };
static void efx_ef100_rep_get_drvinfo(struct net_device *dev, static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
...@@ -106,10 +150,37 @@ static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev, ...@@ -106,10 +150,37 @@ static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev,
efv->msg_enable = msg_enable; efv->msg_enable = msg_enable;
} }
static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kring,
struct netlink_ext_ack *ext_ack)
{
struct efx_rep *efv = netdev_priv(net_dev);
ring->rx_max_pending = U32_MAX;
ring->rx_pending = efv->rx_pring_size;
}
static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kring,
struct netlink_ext_ack *ext_ack)
{
struct efx_rep *efv = netdev_priv(net_dev);
if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending)
return -EINVAL;
efv->rx_pring_size = ring->rx_pending;
return 0;
}
static const struct ethtool_ops efx_ef100_rep_ethtool_ops = { static const struct ethtool_ops efx_ef100_rep_ethtool_ops = {
.get_drvinfo = efx_ef100_rep_get_drvinfo, .get_drvinfo = efx_ef100_rep_get_drvinfo,
.get_msglevel = efx_ef100_rep_ethtool_get_msglevel, .get_msglevel = efx_ef100_rep_ethtool_get_msglevel,
.set_msglevel = efx_ef100_rep_ethtool_set_msglevel, .set_msglevel = efx_ef100_rep_ethtool_set_msglevel,
.get_ringparam = efx_ef100_rep_ethtool_get_ringparam,
.set_ringparam = efx_ef100_rep_ethtool_set_ringparam,
}; };
static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx, static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
...@@ -159,6 +230,7 @@ static int efx_ef100_configure_rep(struct efx_rep *efv) ...@@ -159,6 +230,7 @@ static int efx_ef100_configure_rep(struct efx_rep *efv)
u32 selector; u32 selector;
int rc; int rc;
efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
/* Construct mport selector for corresponding VF */ /* Construct mport selector for corresponding VF */
efx_mae_mport_vf(efx, efv->idx, &selector); efx_mae_mport_vf(efx, efv->idx, &selector);
/* Look up actual mport ID */ /* Look up actual mport ID */
...@@ -169,7 +241,14 @@ static int efx_ef100_configure_rep(struct efx_rep *efv) ...@@ -169,7 +241,14 @@ static int efx_ef100_configure_rep(struct efx_rep *efv)
/* mport label should fit in 16 bits */ /* mport label should fit in 16 bits */
WARN_ON(efv->mport >> 16); WARN_ON(efv->mport >> 16);
return 0; return efx_tc_configure_default_rule_rep(efv);
}
static void efx_ef100_deconfigure_rep(struct efx_rep *efv)
{
struct efx_nic *efx = efv->parent;
efx_tc_deconfigure_default_rule(efx, &efv->dflt);
} }
static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv) static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
...@@ -181,6 +260,7 @@ static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv) ...@@ -181,6 +260,7 @@ static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
list_del(&efv->list); list_del(&efv->list);
spin_unlock_bh(&efx->vf_reps_lock); spin_unlock_bh(&efx->vf_reps_lock);
rtnl_unlock(); rtnl_unlock();
synchronize_rcu();
free_netdev(efv->net_dev); free_netdev(efv->net_dev);
} }
...@@ -202,19 +282,21 @@ int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i) ...@@ -202,19 +282,21 @@ int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
pci_err(efx->pci_dev, pci_err(efx->pci_dev,
"Failed to configure representor for VF %d, rc %d\n", "Failed to configure representor for VF %d, rc %d\n",
i, rc); i, rc);
goto fail; goto fail1;
} }
rc = register_netdev(efv->net_dev); rc = register_netdev(efv->net_dev);
if (rc) { if (rc) {
pci_err(efx->pci_dev, pci_err(efx->pci_dev,
"Failed to register representor for VF %d, rc %d\n", "Failed to register representor for VF %d, rc %d\n",
i, rc); i, rc);
goto fail; goto fail2;
} }
pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i, pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i,
efv->net_dev->name); efv->net_dev->name);
return 0; return 0;
fail: fail2:
efx_ef100_deconfigure_rep(efv);
fail1:
efx_ef100_rep_destroy_netdev(efv); efx_ef100_rep_destroy_netdev(efv);
return rc; return rc;
} }
...@@ -228,6 +310,7 @@ void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv) ...@@ -228,6 +310,7 @@ void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
return; return;
netif_dbg(efx, drv, rep_dev, "Removing VF representor\n"); netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
unregister_netdev(rep_dev); unregister_netdev(rep_dev);
efx_ef100_deconfigure_rep(efv);
efx_ef100_rep_destroy_netdev(efv); efx_ef100_rep_destroy_netdev(efv);
} }
...@@ -242,3 +325,111 @@ void efx_ef100_fini_vfreps(struct efx_nic *efx) ...@@ -242,3 +325,111 @@ void efx_ef100_fini_vfreps(struct efx_nic *efx)
list_for_each_entry_safe(efv, next, &efx->vf_reps, list) list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
efx_ef100_vfrep_destroy(efx, efv); efx_ef100_vfrep_destroy(efx, efv);
} }
static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
{
struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
unsigned int read_index;
struct list_head head;
struct sk_buff *skb;
bool need_resched;
int spent = 0;
INIT_LIST_HEAD(&head);
/* Grab up to 'weight' pending SKBs */
spin_lock_bh(&efv->rx_lock);
read_index = efv->write_index;
while (spent < weight && !list_empty(&efv->rx_list)) {
skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
list_del(&skb->list);
list_add_tail(&skb->list, &head);
spent++;
}
spin_unlock_bh(&efv->rx_lock);
/* Receive them */
netif_receive_skb_list(&head);
if (spent < weight)
if (napi_complete_done(napi, spent)) {
spin_lock_bh(&efv->rx_lock);
efv->read_index = read_index;
/* If write_index advanced while we were doing the
* RX, then storing our read_index won't re-prime the
* fake-interrupt. In that case, we need to schedule
* NAPI again to consume the additional packet(s).
*/
need_resched = efv->write_index != read_index;
spin_unlock_bh(&efv->rx_lock);
if (need_resched)
napi_schedule(&efv->napi);
}
return spent;
}
void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf)
{
u8 *eh = efx_rx_buf_va(rx_buf);
struct sk_buff *skb;
bool primed;
/* Don't allow too many queued SKBs to build up, as they consume
* GFP_ATOMIC memory. If we overrun, just start dropping.
*/
if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
atomic64_inc(&efv->stats.rx_dropped);
if (net_ratelimit())
netif_dbg(efv->parent, rx_err, efv->net_dev,
"nodesc-dropped packet of length %u\n",
rx_buf->len);
return;
}
skb = netdev_alloc_skb(efv->net_dev, rx_buf->len);
if (!skb) {
atomic64_inc(&efv->stats.rx_dropped);
if (net_ratelimit())
netif_dbg(efv->parent, rx_err, efv->net_dev,
"noskb-dropped packet of length %u\n",
rx_buf->len);
return;
}
memcpy(skb->data, eh, rx_buf->len);
__skb_put(skb, rx_buf->len);
skb_record_rx_queue(skb, 0); /* rep is single-queue */
/* Move past the ethernet header */
skb->protocol = eth_type_trans(skb, efv->net_dev);
skb_checksum_none_assert(skb);
atomic64_inc(&efv->stats.rx_packets);
atomic64_add(rx_buf->len, &efv->stats.rx_bytes);
/* Add it to the rx list */
spin_lock_bh(&efv->rx_lock);
primed = efv->read_index == efv->write_index;
list_add_tail(&skb->list, &efv->rx_list);
efv->write_index++;
spin_unlock_bh(&efv->rx_lock);
/* Trigger rx work */
if (primed)
napi_schedule(&efv->napi);
}
struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport)
{
struct efx_rep *efv, *out = NULL;
/* spinlock guards against list mutation while we're walking it;
* but caller must also hold rcu_read_lock() to ensure the netdev
* isn't freed after we drop the spinlock.
*/
spin_lock_bh(&efx->vf_reps_lock);
list_for_each_entry(efv, &efx->vf_reps, list)
if (efv->mport == mport) {
out = efv;
break;
}
spin_unlock_bh(&efx->vf_reps_lock);
return out;
}
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#define EF100_REP_H #define EF100_REP_H
#include "net_driver.h" #include "net_driver.h"
#include "tc.h"
struct efx_rep_sw_stats { struct efx_rep_sw_stats {
atomic64_t rx_packets, tx_packets; atomic64_t rx_packets, tx_packets;
...@@ -29,7 +30,14 @@ struct efx_rep_sw_stats { ...@@ -29,7 +30,14 @@ struct efx_rep_sw_stats {
* @msg_enable: log message enable flags * @msg_enable: log message enable flags
* @mport: m-port ID of corresponding VF * @mport: m-port ID of corresponding VF
* @idx: VF index * @idx: VF index
* @write_index: number of packets enqueued to @rx_list
* @read_index: number of packets consumed from @rx_list
* @rx_pring_size: max length of RX list
* @dflt: default-rule for MAE switching
* @list: entry on efx->vf_reps * @list: entry on efx->vf_reps
* @rx_list: list of SKBs queued for receive in NAPI poll
* @rx_lock: protects @rx_list
* @napi: NAPI control structure
* @stats: software traffic counters for netdev stats * @stats: software traffic counters for netdev stats
*/ */
struct efx_rep { struct efx_rep {
...@@ -38,7 +46,13 @@ struct efx_rep { ...@@ -38,7 +46,13 @@ struct efx_rep {
u32 msg_enable; u32 msg_enable;
u32 mport; u32 mport;
unsigned int idx; unsigned int idx;
unsigned int write_index, read_index;
unsigned int rx_pring_size;
struct efx_tc_flow_rule dflt;
struct list_head list; struct list_head list;
struct list_head rx_list;
spinlock_t rx_lock;
struct napi_struct napi;
struct efx_rep_sw_stats stats; struct efx_rep_sw_stats stats;
}; };
...@@ -46,4 +60,10 @@ int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i); ...@@ -46,4 +60,10 @@ int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i);
void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv); void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv);
void efx_ef100_fini_vfreps(struct efx_nic *efx); void efx_ef100_fini_vfreps(struct efx_nic *efx);
void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf);
/* Returns the representor corresponding to a VF m-port, or NULL
* @mport is an m-port label, *not* an m-port ID!
* Caller must hold rcu_read_lock().
*/
struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport);
#endif /* EF100_REP_H */ #endif /* EF100_REP_H */
...@@ -55,10 +55,14 @@ static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix) ...@@ -55,10 +55,14 @@ static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix)
void __ef100_rx_packet(struct efx_channel *channel) void __ef100_rx_packet(struct efx_channel *channel)
{ {
struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue,
channel->rx_pkt_index);
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
struct ef100_nic_data *nic_data;
u8 *eh = efx_rx_buf_va(rx_buf); u8 *eh = efx_rx_buf_va(rx_buf);
__wsum csum = 0; __wsum csum = 0;
u16 ing_port;
u32 *prefix; u32 *prefix;
prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN); prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
...@@ -76,6 +80,37 @@ void __ef100_rx_packet(struct efx_channel *channel) ...@@ -76,6 +80,37 @@ void __ef100_rx_packet(struct efx_channel *channel)
goto out; goto out;
} }
ing_port = le16_to_cpu((__force __le16) PREFIX_FIELD(prefix, INGRESS_MPORT));
nic_data = efx->nic_data;
if (nic_data->have_mport && ing_port != nic_data->base_mport) {
#ifdef CONFIG_SFC_SRIOV
struct efx_rep *efv;
rcu_read_lock();
efv = efx_ef100_find_rep_by_mport(efx, ing_port);
if (efv) {
if (efv->net_dev->flags & IFF_UP)
efx_ef100_rep_rx_packet(efv, rx_buf);
rcu_read_unlock();
/* Representor Rx doesn't care about PF Rx buffer
* ownership, it just makes a copy. So, we are done
* with the Rx buffer from PF point of view and should
* free it.
*/
goto free_rx_buffer;
}
rcu_read_unlock();
#endif
if (net_ratelimit())
netif_warn(efx, drv, efx->net_dev,
"Unrecognised ing_port %04x (base %04x), dropping\n",
ing_port, nic_data->base_mport);
channel->n_rx_mport_bad++;
goto free_rx_buffer;
}
if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) { if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) {
if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) { if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) {
++channel->n_rx_ip_hdr_chksum_err; ++channel->n_rx_ip_hdr_chksum_err;
...@@ -87,17 +122,16 @@ void __ef100_rx_packet(struct efx_channel *channel) ...@@ -87,17 +122,16 @@ void __ef100_rx_packet(struct efx_channel *channel)
} }
if (channel->type->receive_skb) { if (channel->type->receive_skb) {
struct efx_rx_queue *rx_queue =
efx_channel_get_rx_queue(channel);
/* no support for special channels yet, so just discard */ /* no support for special channels yet, so just discard */
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
efx_free_rx_buffers(rx_queue, rx_buf, 1); goto free_rx_buffer;
goto out;
} }
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum); efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
goto out;
free_rx_buffer:
efx_free_rx_buffers(rx_queue, rx_buf, 1);
out: out:
channel->rx_pkt_n_frags = 0; channel->rx_pkt_n_frags = 0;
} }
......
...@@ -501,14 +501,11 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac) ...@@ -501,14 +501,11 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
efx_device_detach_sync(vf->efx); efx_device_detach_sync(vf->efx);
efx_net_stop(vf->efx->net_dev); efx_net_stop(vf->efx->net_dev);
down_write(&vf->efx->filter_sem);
vf->efx->type->filter_table_remove(vf->efx); vf->efx->type->filter_table_remove(vf->efx);
rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED); rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
if (rc) { if (rc)
up_write(&vf->efx->filter_sem);
return rc; return rc;
}
} }
rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i); rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
...@@ -539,12 +536,9 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac) ...@@ -539,12 +536,9 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
if (vf->efx) { if (vf->efx) {
/* VF cannot use the vport_id that the PF created */ /* VF cannot use the vport_id that the PF created */
rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED); rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
if (rc) { if (rc)
up_write(&vf->efx->filter_sem);
return rc; return rc;
}
vf->efx->type->filter_table_probe(vf->efx); vf->efx->type->filter_table_probe(vf->efx);
up_write(&vf->efx->filter_sem);
efx_net_open(vf->efx->net_dev); efx_net_open(vf->efx->net_dev);
efx_device_attach_if_not_resetting(vf->efx); efx_device_attach_if_not_resetting(vf->efx);
} }
...@@ -580,7 +574,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, ...@@ -580,7 +574,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
efx_net_stop(vf->efx->net_dev); efx_net_stop(vf->efx->net_dev);
mutex_lock(&vf->efx->mac_lock); mutex_lock(&vf->efx->mac_lock);
down_write(&vf->efx->filter_sem);
vf->efx->type->filter_table_remove(vf->efx); vf->efx->type->filter_table_remove(vf->efx);
rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED); rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
...@@ -654,7 +647,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, ...@@ -654,7 +647,6 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
if (rc2) if (rc2)
goto reset_nic_up_write; goto reset_nic_up_write;
up_write(&vf->efx->filter_sem);
mutex_unlock(&vf->efx->mac_lock); mutex_unlock(&vf->efx->mac_lock);
rc2 = efx_net_open(vf->efx->net_dev); rc2 = efx_net_open(vf->efx->net_dev);
...@@ -666,10 +658,8 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, ...@@ -666,10 +658,8 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
return rc; return rc;
reset_nic_up_write: reset_nic_up_write:
if (vf->efx) { if (vf->efx)
up_write(&vf->efx->filter_sem);
mutex_unlock(&vf->efx->mac_lock); mutex_unlock(&vf->efx->mac_lock);
}
reset_nic: reset_nic:
if (vf->efx) { if (vf->efx) {
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
......
...@@ -91,6 +91,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { ...@@ -91,6 +91,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mport_bad),
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count), EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded), EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
......
...@@ -88,6 +88,7 @@ enum efx_filter_priority { ...@@ -88,6 +88,7 @@ enum efx_filter_priority {
* the automatic filter in its place. * the automatic filter in its place.
* @EFX_FILTER_FLAG_RX: Filter is for RX * @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX * @EFX_FILTER_FLAG_TX: Filter is for TX
* @EFX_FILTER_FLAG_VPORT_ID: Virtual port ID for adapter switching.
*/ */
enum efx_filter_flags { enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01, EFX_FILTER_FLAG_RX_RSS = 0x01,
...@@ -95,6 +96,7 @@ enum efx_filter_flags { ...@@ -95,6 +96,7 @@ enum efx_filter_flags {
EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04, EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
EFX_FILTER_FLAG_RX = 0x08, EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10, EFX_FILTER_FLAG_TX = 0x10,
EFX_FILTER_FLAG_VPORT_ID = 0x20,
}; };
/** enum efx_encap_type - types of encapsulation /** enum efx_encap_type - types of encapsulation
...@@ -127,6 +129,9 @@ enum efx_encap_type { ...@@ -127,6 +129,9 @@ enum efx_encap_type {
* MCFW context_id. * MCFW context_id.
* @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
* an RX drop filter * an RX drop filter
* @vport_id: Virtual port ID associated with RX queue, for adapter switching,
* if %EFX_FILTER_FLAG_VPORT_ID is set. This is an MCFW vport_id, or on
* EF100 an mport selector.
* @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
* @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
* @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
...@@ -156,6 +161,7 @@ struct efx_filter_spec { ...@@ -156,6 +161,7 @@ struct efx_filter_spec {
u32 priority:2; u32 priority:2;
u32 flags:6; u32 flags:6;
u32 dmaq_id:12; u32 dmaq_id:12;
u32 vport_id;
u32 rss_context; u32 rss_context;
__be16 outer_vid __aligned(4); /* allow jhash2() of match values */ __be16 outer_vid __aligned(4); /* allow jhash2() of match values */
__be16 inner_vid; __be16 inner_vid;
...@@ -292,6 +298,18 @@ static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec) ...@@ -292,6 +298,18 @@ static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
return 0; return 0;
} }
/**
* efx_filter_set_vport_id - override virtual port id relating to filter
* @spec: Specification to initialise
* @vport_id: firmware ID of the virtual port
*/
static inline void efx_filter_set_vport_id(struct efx_filter_spec *spec,
u32 vport_id)
{
spec->flags |= EFX_FILTER_FLAG_VPORT_ID;
spec->vport_id = vport_id;
}
static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec, static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec,
enum efx_encap_type encap_type) enum efx_encap_type encap_type)
{ {
......
This diff is collapsed.
...@@ -14,9 +14,29 @@ ...@@ -14,9 +14,29 @@
/* MCDI interface for the ef100 Match-Action Engine */ /* MCDI interface for the ef100 Match-Action Engine */
#include "net_driver.h" #include "net_driver.h"
#include "tc.h"
#include "mcdi_pcol.h" /* needed for various MC_CMD_MAE_*_NULL defines */
int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label);
int efx_mae_free_mport(struct efx_nic *efx, u32 id);
void efx_mae_mport_wire(struct efx_nic *efx, u32 *out);
void efx_mae_mport_uplink(struct efx_nic *efx, u32 *out);
void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out); void efx_mae_mport_vf(struct efx_nic *efx, u32 vf_id, u32 *out);
void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id); int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act);
int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id);
int efx_mae_alloc_action_set_list(struct efx_nic *efx,
struct efx_tc_action_set_list *acts);
int efx_mae_free_action_set_list(struct efx_nic *efx,
struct efx_tc_action_set_list *acts);
int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
u32 prio, u32 acts_id, u32 *id);
int efx_mae_delete_rule(struct efx_nic *efx, u32 id);
#endif /* EF100_MAE_H */ #endif /* EF100_MAE_H */
...@@ -205,6 +205,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); ...@@ -205,6 +205,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1))) ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
#define _MCDI_DWORD(_buf, _field) \ #define _MCDI_DWORD(_buf, _field) \
((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2)) ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
#define _MCDI_STRUCT_DWORD(_buf, _field) \
((_buf) + (_MCDI_CHECK_ALIGN(_field ## _OFST, 4) >> 2))
#define MCDI_BYTE(_buf, _field) \ #define MCDI_BYTE(_buf, _field) \
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
...@@ -214,6 +216,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); ...@@ -214,6 +216,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field))) le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
#define MCDI_SET_DWORD(_buf, _field, _value) \ #define MCDI_SET_DWORD(_buf, _field, _value) \
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value) EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
#define MCDI_STRUCT_SET_DWORD(_buf, _field, _value) \
EFX_POPULATE_DWORD_1(*_MCDI_STRUCT_DWORD(_buf, _field), EFX_DWORD_0, _value)
#define MCDI_DWORD(_buf, _field) \ #define MCDI_DWORD(_buf, _field) \
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0) EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \ #define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
......
...@@ -221,7 +221,10 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx, ...@@ -221,7 +221,10 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf); efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf);
} }
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id); if (flags & EFX_FILTER_FLAG_VPORT_ID)
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, spec->vport_id);
else
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
MC_CMD_FILTER_OP_IN_RX_DEST_DROP : MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
...@@ -488,6 +491,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx, ...@@ -488,6 +491,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
saved_spec->flags |= spec->flags; saved_spec->flags |= spec->flags;
saved_spec->rss_context = spec->rss_context; saved_spec->rss_context = spec->rss_context;
saved_spec->dmaq_id = spec->dmaq_id; saved_spec->dmaq_id = spec->dmaq_id;
saved_spec->vport_id = spec->vport_id;
} }
} else if (!replacing) { } else if (!replacing) {
kfree(saved_spec); kfree(saved_spec);
......
...@@ -89,6 +89,7 @@ struct efx_mcdi_filter_table { ...@@ -89,6 +89,7 @@ struct efx_mcdi_filter_table {
*/ */
bool mc_chaining; bool mc_chaining;
bool vlan_filter; bool vlan_filter;
/* Entries on the vlan_list are added/removed under filter_sem */
struct list_head vlan_list; struct list_head vlan_list;
}; };
......
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2019-2022 Xilinx, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef MCDI_PCOL_MAE_H
#define MCDI_PCOL_MAE_H
/* MCDI definitions for Match-Action Engine functionality, that are
* missing from the main mcdi_pcol.h
*/
/* MC_CMD_MAE_COUNTER_LIST_ALLOC is not (yet) a released API, but the
* following value is needed as an argument to MC_CMD_MAE_ACTION_SET_ALLOC.
*/
/* enum: A counter ID that is guaranteed never to represent a real counter */
#define MC_CMD_MAE_COUNTER_LIST_ALLOC_OUT_COUNTER_LIST_ID_NULL 0xffffffff
#endif /* MCDI_PCOL_MAE_H */
...@@ -478,6 +478,8 @@ enum efx_sync_events_state { ...@@ -478,6 +478,8 @@ enum efx_sync_events_state {
* @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors * @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors
* @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP * @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP
* @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP * @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
* @n_rx_mport_bad: Count of RX packets dropped because their ingress mport was
* not recognised
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none * __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
...@@ -540,6 +542,7 @@ struct efx_channel { ...@@ -540,6 +542,7 @@ struct efx_channel {
unsigned int n_rx_xdp_bad_drops; unsigned int n_rx_xdp_bad_drops;
unsigned int n_rx_xdp_tx; unsigned int n_rx_xdp_tx;
unsigned int n_rx_xdp_redirect; unsigned int n_rx_xdp_redirect;
unsigned int n_rx_mport_bad;
unsigned int rx_pkt_n_frags; unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index; unsigned int rx_pkt_index;
...@@ -975,6 +978,7 @@ enum efx_xdp_tx_queues_mode { ...@@ -975,6 +978,7 @@ enum efx_xdp_tx_queues_mode {
* @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their * @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
* xdp_rxq_info structures? * xdp_rxq_info structures?
* @netdev_notifier: Netdevice notifier. * @netdev_notifier: Netdevice notifier.
* @tc: state for TC offload (EF100).
* @mem_bar: The BAR that is mapped into membase. * @mem_bar: The BAR that is mapped into membase.
* @reg_base: Offset from the start of the bar to the function control window. * @reg_base: Offset from the start of the bar to the function control window.
* @monitor_work: Hardware monitor workitem * @monitor_work: Hardware monitor workitem
...@@ -1158,6 +1162,7 @@ struct efx_nic { ...@@ -1158,6 +1162,7 @@ struct efx_nic {
bool xdp_rxq_info_failed; bool xdp_rxq_info_failed;
struct notifier_block netdev_notifier; struct notifier_block netdev_notifier;
struct efx_tc_state *tc;
unsigned int mem_bar; unsigned int mem_bar;
u32 reg_base; u32 reg_base;
......
...@@ -793,7 +793,6 @@ int efx_probe_filters(struct efx_nic *efx) ...@@ -793,7 +793,6 @@ int efx_probe_filters(struct efx_nic *efx)
int rc; int rc;
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
rc = efx->type->filter_table_probe(efx); rc = efx->type->filter_table_probe(efx);
if (rc) if (rc)
goto out_unlock; goto out_unlock;
...@@ -830,7 +829,6 @@ int efx_probe_filters(struct efx_nic *efx) ...@@ -830,7 +829,6 @@ int efx_probe_filters(struct efx_nic *efx)
} }
#endif #endif
out_unlock: out_unlock:
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
return rc; return rc;
} }
...@@ -846,9 +844,7 @@ void efx_remove_filters(struct efx_nic *efx) ...@@ -846,9 +844,7 @@ void efx_remove_filters(struct efx_nic *efx)
channel->rps_flow_id = NULL; channel->rps_flow_id = NULL;
} }
#endif #endif
down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx); efx->type->filter_table_remove(efx);
up_write(&efx->filter_sem);
} }
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
......
// SPDX-License-Identifier: GPL-2.0-only
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2020-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include "tc.h"
#include "mae.h"
#include "ef100_rep.h"
#include "efx.h"
static void efx_tc_free_action_set(struct efx_nic *efx,
struct efx_tc_action_set *act, bool in_hw)
{
/* Failure paths calling this on the 'running action' set in_hw=false,
* because if the alloc had succeeded we'd've put it in acts.list and
* not still have it in act.
*/
if (in_hw) {
efx_mae_free_action_set(efx, act->fw_id);
/* in_hw is true iff we are on an acts.list; make sure to
* remove ourselves from that list before we are freed.
*/
list_del(&act->list);
}
kfree(act);
}
static void efx_tc_free_action_set_list(struct efx_nic *efx,
struct efx_tc_action_set_list *acts,
bool in_hw)
{
struct efx_tc_action_set *act, *next;
/* Failure paths set in_hw=false, because usually the acts didn't get
* to efx_mae_alloc_action_set_list(); if they did, the failure tree
* has a separate efx_mae_free_action_set_list() before calling us.
*/
if (in_hw)
efx_mae_free_action_set_list(efx, acts);
/* Any act that's on the list will be in_hw even if the list isn't */
list_for_each_entry_safe(act, next, &acts->list, list)
efx_tc_free_action_set(efx, act, true);
/* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */
}
static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule)
{
efx_mae_delete_rule(efx, rule->fw_id);
/* Release entries in subsidiary tables */
efx_tc_free_action_set_list(efx, &rule->acts, true);
rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
}
static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port,
u32 eg_port, struct efx_tc_flow_rule *rule)
{
struct efx_tc_action_set_list *acts = &rule->acts;
struct efx_tc_match *match = &rule->match;
struct efx_tc_action_set *act;
int rc;
match->value.ingress_port = ing_port;
match->mask.ingress_port = ~0;
act = kzalloc(sizeof(*act), GFP_KERNEL);
if (!act)
return -ENOMEM;
act->deliver = 1;
act->dest_mport = eg_port;
rc = efx_mae_alloc_action_set(efx, act);
if (rc)
goto fail1;
EFX_WARN_ON_PARANOID(!list_empty(&acts->list));
list_add_tail(&act->list, &acts->list);
rc = efx_mae_alloc_action_set_list(efx, acts);
if (rc)
goto fail2;
rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT,
acts->fw_id, &rule->fw_id);
if (rc)
goto fail3;
return 0;
fail3:
efx_mae_free_action_set_list(efx, acts);
fail2:
list_del(&act->list);
efx_mae_free_action_set(efx, act->fw_id);
fail1:
kfree(act);
return rc;
}
static int efx_tc_configure_default_rule_pf(struct efx_nic *efx)
{
struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf;
u32 ing_port, eg_port;
efx_mae_mport_uplink(efx, &ing_port);
efx_mae_mport_wire(efx, &eg_port);
return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
}
static int efx_tc_configure_default_rule_wire(struct efx_nic *efx)
{
struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire;
u32 ing_port, eg_port;
efx_mae_mport_wire(efx, &ing_port);
efx_mae_mport_uplink(efx, &eg_port);
return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
}
int efx_tc_configure_default_rule_rep(struct efx_rep *efv)
{
struct efx_tc_flow_rule *rule = &efv->dflt;
struct efx_nic *efx = efv->parent;
u32 ing_port, eg_port;
efx_mae_mport_mport(efx, efv->mport, &ing_port);
efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port);
return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule);
}
void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
struct efx_tc_flow_rule *rule)
{
if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL)
efx_tc_delete_rule(efx, rule);
rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
}
static int efx_tc_configure_rep_mport(struct efx_nic *efx)
{
u32 rep_mport_label;
int rc;
rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label);
if (rc)
return rc;
pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n",
efx->tc->reps_mport_id, rep_mport_label);
/* Use mport *selector* as vport ID */
efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
&efx->tc->reps_mport_vport_id);
return 0;
}
static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx)
{
efx_mae_free_mport(efx, efx->tc->reps_mport_id);
efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL;
}
int efx_tc_insert_rep_filters(struct efx_nic *efx)
{
struct efx_filter_spec promisc, allmulti;
int rc;
if (efx->type->is_vf)
return 0;
if (!efx->tc)
return 0;
efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0);
efx_filter_set_uc_def(&promisc);
efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id);
rc = efx_filter_insert_filter(efx, &promisc, false);
if (rc < 0)
return rc;
efx->tc->reps_filter_uc = rc;
efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0);
efx_filter_set_mc_def(&allmulti);
efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id);
rc = efx_filter_insert_filter(efx, &allmulti, false);
if (rc < 0)
return rc;
efx->tc->reps_filter_mc = rc;
return 0;
}
void efx_tc_remove_rep_filters(struct efx_nic *efx)
{
if (efx->type->is_vf)
return;
if (!efx->tc)
return;
if (efx->tc->reps_filter_mc >= 0)
efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc);
efx->tc->reps_filter_mc = -1;
if (efx->tc->reps_filter_uc >= 0)
efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc);
efx->tc->reps_filter_uc = -1;
}
int efx_init_tc(struct efx_nic *efx)
{
int rc;
rc = efx_tc_configure_default_rule_pf(efx);
if (rc)
return rc;
rc = efx_tc_configure_default_rule_wire(efx);
if (rc)
return rc;
return efx_tc_configure_rep_mport(efx);
}
void efx_fini_tc(struct efx_nic *efx)
{
/* We can get called even if efx_init_struct_tc() failed */
if (!efx->tc)
return;
efx_tc_deconfigure_rep_mport(efx);
efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
}
int efx_init_struct_tc(struct efx_nic *efx)
{
if (efx->type->is_vf)
return 0;
efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL);
if (!efx->tc)
return -ENOMEM;
efx->tc->reps_filter_uc = -1;
efx->tc->reps_filter_mc = -1;
INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
return 0;
}
void efx_fini_struct_tc(struct efx_nic *efx)
{
if (!efx->tc)
return;
EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id !=
MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
kfree(efx->tc);
efx->tc = NULL;
}
/* SPDX-License-Identifier: GPL-2.0-only */
/****************************************************************************
* Driver for Solarflare network controllers and boards
* Copyright 2019 Solarflare Communications Inc.
* Copyright 2020-2022 Xilinx Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#ifndef EFX_TC_H
#define EFX_TC_H
#include "net_driver.h"
struct efx_tc_action_set {
u16 deliver:1;
u32 dest_mport;
u32 fw_id; /* index of this entry in firmware actions table */
struct list_head list;
};
struct efx_tc_match_fields {
/* L1 */
u32 ingress_port;
};
struct efx_tc_match {
struct efx_tc_match_fields value;
struct efx_tc_match_fields mask;
};
struct efx_tc_action_set_list {
struct list_head list;
u32 fw_id;
};
struct efx_tc_flow_rule {
struct efx_tc_match match;
struct efx_tc_action_set_list acts;
u32 fw_id;
};
enum efx_tc_rule_prios {
EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */
EFX_TC_PRIO__NUM
};
/**
* struct efx_tc_state - control plane data for TC offload
*
* @reps_mport_id: MAE port allocated for representor RX
* @reps_filter_uc: VNIC filter for representor unicast RX (promisc)
* @reps_filter_mc: VNIC filter for representor multicast RX (allmulti)
* @reps_mport_vport_id: vport_id for representor RX filters
* @dflt: Match-action rules for default switching; at priority
* %EFX_TC_PRIO_DFLT. Named by *ingress* port
* @dflt.pf: rule for traffic ingressing from PF (egresses to wire)
* @dflt.wire: rule for traffic ingressing from wire (egresses to PF)
*/
struct efx_tc_state {
u32 reps_mport_id, reps_mport_vport_id;
s32 reps_filter_uc, reps_filter_mc;
struct {
struct efx_tc_flow_rule pf;
struct efx_tc_flow_rule wire;
} dflt;
};
struct efx_rep;
int efx_tc_configure_default_rule_rep(struct efx_rep *efv);
void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
struct efx_tc_flow_rule *rule);
int efx_tc_insert_rep_filters(struct efx_nic *efx);
void efx_tc_remove_rep_filters(struct efx_nic *efx);
int efx_init_tc(struct efx_nic *efx);
void efx_fini_tc(struct efx_nic *efx);
int efx_init_struct_tc(struct efx_nic *efx);
void efx_fini_struct_tc(struct efx_nic *efx);
#endif /* EFX_TC_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment