Commit 45cc3a0c authored by David S. Miller's avatar David S. Miller

Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next

Ben Hutchings says:

====================
More refactoring and cleanup, particularly around filter management.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 35fdb94b b766630b
sfc-y += efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \ sfc-y += efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \
filter.o \
selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
tenxpress.o txc43128_phy.o falcon_boards.o \ tenxpress.o txc43128_phy.o falcon_boards.o \
mcdi.o mcdi_port.o mcdi_mon.o ptp.o mcdi.o mcdi_port.o mcdi_mon.o ptp.o
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/in.h> #include <linux/in.h>
#include <linux/crc32.h>
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/topology.h> #include <linux/topology.h>
#include <linux/gfp.h> #include <linux/gfp.h>
...@@ -339,6 +338,7 @@ static void efx_init_eventq(struct efx_channel *channel) ...@@ -339,6 +338,7 @@ static void efx_init_eventq(struct efx_channel *channel)
channel->eventq_read_ptr = 0; channel->eventq_read_ptr = 0;
efx_nic_init_eventq(channel); efx_nic_init_eventq(channel);
channel->eventq_init = true;
} }
/* Enable event queue processing and NAPI */ /* Enable event queue processing and NAPI */
...@@ -367,10 +367,14 @@ static void efx_stop_eventq(struct efx_channel *channel) ...@@ -367,10 +367,14 @@ static void efx_stop_eventq(struct efx_channel *channel)
static void efx_fini_eventq(struct efx_channel *channel) static void efx_fini_eventq(struct efx_channel *channel)
{ {
if (!channel->eventq_init)
return;
netif_dbg(channel->efx, drv, channel->efx->net_dev, netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d fini event queue\n", channel->channel); "chan %d fini event queue\n", channel->channel);
efx_nic_fini_eventq(channel); efx_nic_fini_eventq(channel);
channel->eventq_init = false;
} }
static void efx_remove_eventq(struct efx_channel *channel) static void efx_remove_eventq(struct efx_channel *channel)
...@@ -606,7 +610,7 @@ static void efx_start_datapath(struct efx_nic *efx) ...@@ -606,7 +610,7 @@ static void efx_start_datapath(struct efx_nic *efx)
/* RX filters also have scatter-enabled flags */ /* RX filters also have scatter-enabled flags */
if (efx->rx_scatter != old_rx_scatter) if (efx->rx_scatter != old_rx_scatter)
efx_filter_update_rx_scatter(efx); efx->type->filter_update_rx_scatter(efx);
/* We must keep at least one descriptor in a TX ring empty. /* We must keep at least one descriptor in a TX ring empty.
* We could avoid this when the queue size does not exactly * We could avoid this when the queue size does not exactly
...@@ -871,10 +875,9 @@ void efx_link_status_changed(struct efx_nic *efx) ...@@ -871,10 +875,9 @@ void efx_link_status_changed(struct efx_nic *efx)
/* Status message for kernel log */ /* Status message for kernel log */
if (link_state->up) if (link_state->up)
netif_info(efx, link, efx->net_dev, netif_info(efx, link, efx->net_dev,
"link up at %uMbps %s-duplex (MTU %d)%s\n", "link up at %uMbps %s-duplex (MTU %d)\n",
link_state->speed, link_state->fd ? "full" : "half", link_state->speed, link_state->fd ? "full" : "half",
efx->net_dev->mtu, efx->net_dev->mtu);
(efx->promiscuous ? " [PROMISC]" : ""));
else else
netif_info(efx, link, efx->net_dev, "link down\n"); netif_info(efx, link, efx->net_dev, "link down\n");
} }
...@@ -923,10 +926,6 @@ int __efx_reconfigure_port(struct efx_nic *efx) ...@@ -923,10 +926,6 @@ int __efx_reconfigure_port(struct efx_nic *efx)
WARN_ON(!mutex_is_locked(&efx->mac_lock)); WARN_ON(!mutex_is_locked(&efx->mac_lock));
/* Serialise the promiscuous flag with efx_set_rx_mode. */
netif_addr_lock_bh(efx->net_dev);
netif_addr_unlock_bh(efx->net_dev);
/* Disable PHY transmit in mac level loopbacks */ /* Disable PHY transmit in mac level loopbacks */
phy_mode = efx->phy_mode; phy_mode = efx->phy_mode;
if (LOOPBACK_INTERNAL(efx)) if (LOOPBACK_INTERNAL(efx))
...@@ -1084,6 +1083,7 @@ static int efx_init_io(struct efx_nic *efx) ...@@ -1084,6 +1083,7 @@ static int efx_init_io(struct efx_nic *efx)
{ {
struct pci_dev *pci_dev = efx->pci_dev; struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask; dma_addr_t dma_mask = efx->type->max_dma_mask;
unsigned int mem_map_size = efx->type->mem_map_size(efx);
int rc; int rc;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
...@@ -1136,20 +1136,18 @@ static int efx_init_io(struct efx_nic *efx) ...@@ -1136,20 +1136,18 @@ static int efx_init_io(struct efx_nic *efx)
rc = -EIO; rc = -EIO;
goto fail3; goto fail3;
} }
efx->membase = ioremap_nocache(efx->membase_phys, efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
efx->type->mem_map_size);
if (!efx->membase) { if (!efx->membase) {
netif_err(efx, probe, efx->net_dev, netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n", "could not map memory BAR at %llx+%x\n",
(unsigned long long)efx->membase_phys, (unsigned long long)efx->membase_phys, mem_map_size);
efx->type->mem_map_size);
rc = -ENOMEM; rc = -ENOMEM;
goto fail4; goto fail4;
} }
netif_dbg(efx, probe, efx->net_dev, netif_dbg(efx, probe, efx->net_dev,
"memory BAR at %llx+%x (virtual %p)\n", "memory BAR at %llx+%x (virtual %p)\n",
(unsigned long long)efx->membase_phys, (unsigned long long)efx->membase_phys, mem_map_size,
efx->type->mem_map_size, efx->membase); efx->membase);
return 0; return 0;
...@@ -1228,8 +1226,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) ...@@ -1228,8 +1226,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
*/ */
static int efx_probe_interrupts(struct efx_nic *efx) static int efx_probe_interrupts(struct efx_nic *efx)
{ {
unsigned int max_channels =
min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
unsigned int extra_channels = 0; unsigned int extra_channels = 0;
unsigned int i, j; unsigned int i, j;
int rc; int rc;
...@@ -1246,7 +1242,7 @@ static int efx_probe_interrupts(struct efx_nic *efx) ...@@ -1246,7 +1242,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
if (separate_tx_channels) if (separate_tx_channels)
n_channels *= 2; n_channels *= 2;
n_channels += extra_channels; n_channels += extra_channels;
n_channels = min(n_channels, max_channels); n_channels = min(n_channels, efx->max_channels);
for (i = 0; i < n_channels; i++) for (i = 0; i < n_channels; i++)
xentries[i].entry = i; xentries[i].entry = i;
...@@ -1497,6 +1493,44 @@ static void efx_remove_nic(struct efx_nic *efx) ...@@ -1497,6 +1493,44 @@ static void efx_remove_nic(struct efx_nic *efx)
efx->type->remove(efx); efx->type->remove(efx);
} }
static int efx_probe_filters(struct efx_nic *efx)
{
int rc;
spin_lock_init(&efx->filter_lock);
rc = efx->type->filter_table_probe(efx);
if (rc)
return rc;
#ifdef CONFIG_RFS_ACCEL
if (efx->type->offload_features & NETIF_F_NTUPLE) {
efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
sizeof(*efx->rps_flow_id),
GFP_KERNEL);
if (!efx->rps_flow_id) {
efx->type->filter_table_remove(efx);
return -ENOMEM;
}
}
#endif
return 0;
}
static void efx_remove_filters(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
kfree(efx->rps_flow_id);
#endif
efx->type->filter_table_remove(efx);
}
static void efx_restore_filters(struct efx_nic *efx)
{
efx->type->filter_table_restore(efx);
}
/************************************************************************** /**************************************************************************
* *
* NIC startup/shutdown * NIC startup/shutdown
...@@ -1987,30 +2021,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) ...@@ -1987,30 +2021,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
static void efx_set_rx_mode(struct net_device *net_dev) static void efx_set_rx_mode(struct net_device *net_dev)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
struct netdev_hw_addr *ha;
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
u32 crc;
int bit;
efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
/* Build multicast hash table */
if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
netdev_for_each_mc_addr(ha, net_dev) {
crc = ether_crc_le(ETH_ALEN, ha->addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
__set_bit_le(bit, mc_hash);
}
/* Broadcast packets go through the multicast hash filter.
* ether_crc_le() of the broadcast address is 0xbe2612ff
* so we always add bit 0xff to the mask.
*/
__set_bit_le(0xff, mc_hash);
}
if (efx->port_enabled) if (efx->port_enabled)
queue_work(efx->workqueue, &efx->mac_work); queue_work(efx->workqueue, &efx->mac_work);
...@@ -2489,8 +2499,6 @@ static int efx_init_struct(struct efx_nic *efx, ...@@ -2489,8 +2499,6 @@ static int efx_init_struct(struct efx_nic *efx,
efx->msi_context[i].index = i; efx->msi_context[i].index = i;
} }
EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
/* Higher numbered interrupt modes are less capable! */ /* Higher numbered interrupt modes are less capable! */
efx->interrupt_mode = max(efx->type->max_interrupt_mode, efx->interrupt_mode = max(efx->type->max_interrupt_mode,
interrupt_mode); interrupt_mode);
......
...@@ -68,27 +68,92 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); ...@@ -68,27 +68,92 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) #define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
/* Filters */ /* Filters */
extern int efx_probe_filters(struct efx_nic *efx);
extern void efx_restore_filters(struct efx_nic *efx); /**
extern void efx_remove_filters(struct efx_nic *efx); * efx_filter_insert_filter - add or replace a filter
extern void efx_filter_update_rx_scatter(struct efx_nic *efx); * @efx: NIC in which to insert the filter
extern s32 efx_filter_insert_filter(struct efx_nic *efx, * @spec: Specification for the filter
struct efx_filter_spec *spec, * @replace_equal: Flag for whether the specified filter may replace an
bool replace); * existing filter with equal priority
extern int efx_filter_remove_id_safe(struct efx_nic *efx, *
enum efx_filter_priority priority, * On success, return the filter ID.
u32 filter_id); * On failure, return a negative error code.
extern int efx_filter_get_filter_safe(struct efx_nic *efx, *
enum efx_filter_priority priority, * If an existing filter has equal match values to the new filter
u32 filter_id, struct efx_filter_spec *); * spec, then the new filter might replace it, depending on the
extern void efx_filter_clear_rx(struct efx_nic *efx, * relative priorities. If the existing filter has lower priority, or
enum efx_filter_priority priority); * if @replace_equal is set and it has equal priority, then it is
extern u32 efx_filter_count_rx_used(struct efx_nic *efx, * replaced. Otherwise the function fails, returning -%EPERM if
enum efx_filter_priority priority); * the existing filter has higher priority or -%EEXIST if it has
extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx); * equal priority.
extern s32 efx_filter_get_rx_ids(struct efx_nic *efx, */
enum efx_filter_priority priority, static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
u32 *buf, u32 size); struct efx_filter_spec *spec,
bool replace_equal)
{
return efx->type->filter_insert(efx, spec, replace_equal);
}
/**
* efx_filter_remove_id_safe - remove a filter by ID, carefully
* @efx: NIC from which to remove the filter
* @priority: Priority of filter, as passed to @efx_filter_insert_filter
* @filter_id: ID of filter, as returned by @efx_filter_insert_filter
*
* This function will range-check @filter_id, so it is safe to call
* with a value passed from userland.
*/
static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id)
{
return efx->type->filter_remove_safe(efx, priority, filter_id);
}
/**
* efx_filter_get_filter_safe - retrieve a filter by ID, carefully
* @efx: NIC from which to remove the filter
* @priority: Priority of filter, as passed to @efx_filter_insert_filter
* @filter_id: ID of filter, as returned by @efx_filter_insert_filter
* @spec: Buffer in which to store filter specification
*
* This function will range-check @filter_id, so it is safe to call
* with a value passed from userland.
*/
static inline int
efx_filter_get_filter_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *spec)
{
return efx->type->filter_get_safe(efx, priority, filter_id, spec);
}
/**
* efx_farch_filter_clear_rx - remove RX filters by priority
* @efx: NIC from which to remove the filters
* @priority: Maximum priority to remove
*/
static inline void efx_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority)
{
return efx->type->filter_clear_rx(efx, priority);
}
static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority)
{
return efx->type->filter_count_rx_used(efx, priority);
}
static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
{
return efx->type->filter_get_rx_id_limit(efx);
}
static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 *buf, u32 size)
{
return efx->type->filter_get_rx_ids(efx, priority, buf, size);
}
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id); u16 rxq_index, u32 flow_id);
......
...@@ -799,11 +799,12 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) ...@@ -799,11 +799,12 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
return efx_reset(efx, rc); return efx_reset(efx, rc);
} }
/* MAC address mask including only MC flag */ /* MAC address mask including only I/G bit */
static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
#define IP4_ADDR_FULL_MASK ((__force __be32)~0) #define IP4_ADDR_FULL_MASK ((__force __be32)~0)
#define PORT_FULL_MASK ((__force __be16)~0) #define PORT_FULL_MASK ((__force __be16)~0)
#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
static int efx_ethtool_get_class_rule(struct efx_nic *efx, static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethtool_rx_flow_spec *rule) struct ethtool_rx_flow_spec *rule)
...@@ -813,8 +814,6 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, ...@@ -813,8 +814,6 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethhdr *mac_entry = &rule->h_u.ether_spec; struct ethhdr *mac_entry = &rule->h_u.ether_spec;
struct ethhdr *mac_mask = &rule->m_u.ether_spec; struct ethhdr *mac_mask = &rule->m_u.ether_spec;
struct efx_filter_spec spec; struct efx_filter_spec spec;
u16 vid;
u8 proto;
int rc; int rc;
rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL, rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
...@@ -822,44 +821,72 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, ...@@ -822,44 +821,72 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
if (rc) if (rc)
return rc; return rc;
if (spec.dmaq_id == 0xfff) if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
rule->ring_cookie = RX_CLS_FLOW_DISC; rule->ring_cookie = RX_CLS_FLOW_DISC;
else else
rule->ring_cookie = spec.dmaq_id; rule->ring_cookie = spec.dmaq_id;
if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) { if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
rule->flow_type = ETHER_FLOW; spec.ether_type == htons(ETH_P_IP) &&
memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN); (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
if (spec.type == EFX_FILTER_MC_DEF) (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN); !(spec.match_flags &
return 0; ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
} EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
EFX_FILTER_MATCH_IP_PROTO |
rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest); EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
if (rc == 0) { rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
TCP_V4_FLOW : UDP_V4_FLOW);
if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
ip_entry->ip4dst = spec.loc_host[0];
ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
}
if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
ip_entry->ip4src = spec.rem_host[0];
ip_mask->ip4src = IP4_ADDR_FULL_MASK;
}
if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
ip_entry->pdst = spec.loc_port;
ip_mask->pdst = PORT_FULL_MASK;
}
if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
ip_entry->psrc = spec.rem_port;
ip_mask->psrc = PORT_FULL_MASK;
}
} else if (!(spec.match_flags &
~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
EFX_FILTER_MATCH_OUTER_VID))) {
rule->flow_type = ETHER_FLOW; rule->flow_type = ETHER_FLOW;
memset(mac_mask->h_dest, ~0, ETH_ALEN); if (spec.match_flags &
if (vid != EFX_FILTER_VID_UNSPEC) { (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
rule->flow_type |= FLOW_EXT; memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN);
rule->h_ext.vlan_tci = htons(vid); if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
rule->m_ext.vlan_tci = htons(0xfff); memset(mac_mask->h_dest, ~0, ETH_ALEN);
else
memcpy(mac_mask->h_dest, mac_addr_ig_mask,
ETH_ALEN);
} }
return 0; if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN);
memset(mac_mask->h_source, ~0, ETH_ALEN);
}
if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
mac_entry->h_proto = spec.ether_type;
mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
}
} else {
/* The above should handle all filters that we insert */
WARN_ON(1);
return -EINVAL;
} }
rc = efx_filter_get_ipv4_local(&spec, &proto, if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
&ip_entry->ip4dst, &ip_entry->pdst); rule->flow_type |= FLOW_EXT;
if (rc != 0) { rule->h_ext.vlan_tci = spec.outer_vid;
rc = efx_filter_get_ipv4_full( rule->m_ext.vlan_tci = htons(0xfff);
&spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
&ip_entry->ip4src, &ip_entry->psrc);
EFX_WARN_ON_PARANOID(rc);
ip_mask->ip4src = IP4_ADDR_FULL_MASK;
ip_mask->psrc = PORT_FULL_MASK;
} }
rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
ip_mask->pdst = PORT_FULL_MASK;
return rc; return rc;
} }
...@@ -967,82 +994,80 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, ...@@ -967,82 +994,80 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
(rule->ring_cookie == RX_CLS_FLOW_DISC) ? (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
0xfff : rule->ring_cookie); EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
switch (rule->flow_type) { switch (rule->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW: case TCP_V4_FLOW:
case UDP_V4_FLOW: { case UDP_V4_FLOW:
u8 proto = (rule->flow_type == TCP_V4_FLOW ? spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
IPPROTO_TCP : IPPROTO_UDP); EFX_FILTER_MATCH_IP_PROTO);
spec.ether_type = htons(ETH_P_IP);
/* Must match all of destination, */ spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK && IPPROTO_TCP : IPPROTO_UDP);
ip_mask->pdst == PORT_FULL_MASK)) if (ip_mask->ip4dst) {
return -EINVAL; if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
/* all or none of source, */ return -EINVAL;
if ((ip_mask->ip4src || ip_mask->psrc) && spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
!(ip_mask->ip4src == IP4_ADDR_FULL_MASK && spec.loc_host[0] = ip_entry->ip4dst;
ip_mask->psrc == PORT_FULL_MASK)) }
return -EINVAL; if (ip_mask->ip4src) {
/* and nothing else */ if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
if (ip_mask->tos || rule->m_ext.vlan_tci) return -EINVAL;
spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
spec.rem_host[0] = ip_entry->ip4src;
}
if (ip_mask->pdst) {
if (ip_mask->pdst != PORT_FULL_MASK)
return -EINVAL;
spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
spec.loc_port = ip_entry->pdst;
}
if (ip_mask->psrc) {
if (ip_mask->psrc != PORT_FULL_MASK)
return -EINVAL;
spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
spec.rem_port = ip_entry->psrc;
}
if (ip_mask->tos)
return -EINVAL; return -EINVAL;
if (ip_mask->ip4src)
rc = efx_filter_set_ipv4_full(&spec, proto,
ip_entry->ip4dst,
ip_entry->pdst,
ip_entry->ip4src,
ip_entry->psrc);
else
rc = efx_filter_set_ipv4_local(&spec, proto,
ip_entry->ip4dst,
ip_entry->pdst);
if (rc)
return rc;
break; break;
}
case ETHER_FLOW | FLOW_EXT:
case ETHER_FLOW: {
u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ?
ntohs(rule->m_ext.vlan_tci) : 0);
/* Must not match on source address or Ethertype */ case ETHER_FLOW:
if (!is_zero_ether_addr(mac_mask->h_source) || if (!is_zero_ether_addr(mac_mask->h_dest)) {
mac_mask->h_proto) if (ether_addr_equal(mac_mask->h_dest,
return -EINVAL; mac_addr_ig_mask))
spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
/* Is it a default UC or MC filter? */ else if (is_broadcast_ether_addr(mac_mask->h_dest))
if (ether_addr_equal(mac_mask->h_dest, mac_addr_mc_mask) && spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
vlan_tag_mask == 0) {
if (is_multicast_ether_addr(mac_entry->h_dest))
rc = efx_filter_set_mc_def(&spec);
else else
rc = efx_filter_set_uc_def(&spec); return -EINVAL;
memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN);
} }
/* Otherwise, it must match all of destination and all if (!is_zero_ether_addr(mac_mask->h_source)) {
* or none of VID. if (!is_broadcast_ether_addr(mac_mask->h_source))
*/ return -EINVAL;
else if (is_broadcast_ether_addr(mac_mask->h_dest) && spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
(vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) { memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN);
rc = efx_filter_set_eth_local( }
&spec, if (mac_mask->h_proto) {
vlan_tag_mask ? if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC, return -EINVAL;
mac_entry->h_dest); spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
} else { spec.ether_type = mac_entry->h_proto;
rc = -EINVAL;
} }
if (rc)
return rc;
break; break;
}
default: default:
return -EINVAL; return -EINVAL;
} }
if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
if (rule->m_ext.vlan_tci != htons(0xfff))
return -EINVAL;
spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
spec.outer_vid = rule->h_ext.vlan_tci;
}
rc = efx_filter_insert_filter(efx, &spec, true); rc = efx_filter_insert_filter(efx, &spec, true);
if (rc < 0) if (rc < 0)
return rc; return rc;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -128,6 +128,60 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); ...@@ -128,6 +128,60 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value) EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
#define MCDI_DWORD(_buf, _field) \ #define MCDI_DWORD(_buf, _field) \
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0) EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1)
#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \
_name2, _value2) \
EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2)
#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \
_name2, _value2, _name3, _value3) \
EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2, \
MC_CMD_ ## _name3, _value3)
#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \
_name2, _value2, _name3, _value3, \
_name4, _value4) \
EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2, \
MC_CMD_ ## _name3, _value3, \
MC_CMD_ ## _name4, _value4)
#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \
_name2, _value2, _name3, _value3, \
_name4, _value4, _name5, _value5) \
EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2, \
MC_CMD_ ## _name3, _value3, \
MC_CMD_ ## _name4, _value4, \
MC_CMD_ ## _name5, _value5)
#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \
_name2, _value2, _name3, _value3, \
_name4, _value4, _name5, _value5, \
_name6, _value6) \
EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2, \
MC_CMD_ ## _name3, _value3, \
MC_CMD_ ## _name4, _value4, \
MC_CMD_ ## _name5, _value5, \
MC_CMD_ ## _name6, _value6)
#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \
_name2, _value2, _name3, _value3, \
_name4, _value4, _name5, _value5, \
_name6, _value6, _name7, _value7) \
EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2, \
MC_CMD_ ## _name3, _value3, \
MC_CMD_ ## _name4, _value4, \
MC_CMD_ ## _name5, _value5, \
MC_CMD_ ## _name6, _value6, \
MC_CMD_ ## _name7, _value7)
#define MCDI_SET_QWORD(_buf, _field, _value) \ #define MCDI_SET_QWORD(_buf, _field, _value) \
do { \ do { \
EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \ EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
......
...@@ -861,7 +861,7 @@ void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) ...@@ -861,7 +861,7 @@ void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
int efx_mcdi_set_mac(struct efx_nic *efx) int efx_mcdi_set_mac(struct efx_nic *efx)
{ {
u32 reject, fcntl; u32 fcntl;
MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN); MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
...@@ -873,12 +873,9 @@ int efx_mcdi_set_mac(struct efx_nic *efx) ...@@ -873,12 +873,9 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
EFX_MAX_FRAME_LEN(efx->net_dev->mtu)); EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0); MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
/* The MCDI command provides for controlling accept/reject /* Set simple MAC filter for Siena */
* of broadcast packets too, but the driver doesn't currently MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
* expose this. */ SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
reject = (efx->promiscuous) ? 0 :
(1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN);
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject);
switch (efx->wanted_fc) { switch (efx->wanted_fc) {
case EFX_FC_RX | EFX_FC_TX: case EFX_FC_RX | EFX_FC_TX:
...@@ -926,21 +923,19 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, ...@@ -926,21 +923,19 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
{ {
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
int rc; int rc;
efx_dword_t *cmd_ptr;
int period = enable ? 1000 : 0; int period = enable ? 1000 : 0;
BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0); BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr); MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD); MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
EFX_POPULATE_DWORD_7(*cmd_ptr, MAC_STATS_IN_DMA, !!enable,
MC_CMD_MAC_STATS_IN_DMA, !!enable, MAC_STATS_IN_CLEAR, clear,
MC_CMD_MAC_STATS_IN_CLEAR, clear, MAC_STATS_IN_PERIODIC_CHANGE, 1,
MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1, MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable, MAC_STATS_IN_PERIODIC_CLEAR, 0,
MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0, MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1, MAC_STATS_IN_PERIOD_MS, period);
MC_CMD_MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
......
This diff is collapsed.
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "enum.h" #include "enum.h"
#include "bitfield.h" #include "bitfield.h"
#include "filter.h"
/************************************************************************** /**************************************************************************
* *
...@@ -356,6 +357,7 @@ enum efx_rx_alloc_method { ...@@ -356,6 +357,7 @@ enum efx_rx_alloc_method {
* @efx: Associated Efx NIC * @efx: Associated Efx NIC
* @channel: Channel instance number * @channel: Channel instance number
* @type: Channel type definition * @type: Channel type definition
* @eventq_init: Event queue initialised flag
* @enabled: Channel enabled indicator * @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only) * @irq: IRQ number (MSI and MSI-X only)
* @irq_moderation: IRQ moderation value (in hardware ticks) * @irq_moderation: IRQ moderation value (in hardware ticks)
...@@ -387,6 +389,7 @@ struct efx_channel { ...@@ -387,6 +389,7 @@ struct efx_channel {
struct efx_nic *efx; struct efx_nic *efx;
int channel; int channel;
const struct efx_channel_type *type; const struct efx_channel_type *type;
bool eventq_init;
bool enabled; bool enabled;
int irq; int irq;
unsigned int irq_moderation; unsigned int irq_moderation;
...@@ -674,7 +677,6 @@ union efx_multicast_hash { ...@@ -674,7 +677,6 @@ union efx_multicast_hash {
efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
}; };
struct efx_filter_state;
struct efx_vf; struct efx_vf;
struct vfdi_status; struct vfdi_status;
...@@ -751,8 +753,10 @@ struct vfdi_status; ...@@ -751,8 +753,10 @@ struct vfdi_status;
* @link_advertising: Autonegotiation advertising flags * @link_advertising: Autonegotiation advertising flags
* @link_state: Current state of the link * @link_state: Current state of the link
* @n_link_state_changes: Number of times the link has changed state * @n_link_state_changes: Number of times the link has changed state
* @promiscuous: Promiscuous flag. Protected by netif_tx_lock. * @unicast_filter: Flag for Falcon-arch simple unicast filter.
* @multicast_hash: Multicast hash table * Protected by @mac_lock.
* @multicast_hash: Multicast hash table for Falcon-arch.
* Protected by @mac_lock.
* @wanted_fc: Wanted flow control flags * @wanted_fc: Wanted flow control flags
* @fc_disable: When non-zero flow control is disabled. Typically used to * @fc_disable: When non-zero flow control is disabled. Typically used to
* ensure that network back pressure doesn't delay dma queue flushes. * ensure that network back pressure doesn't delay dma queue flushes.
...@@ -761,6 +765,11 @@ struct vfdi_status; ...@@ -761,6 +765,11 @@ struct vfdi_status;
* @loopback_mode: Loopback status * @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask * @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state * @loopback_selftest: Offline self-test private state
* @filter_lock: Filter table lock
* @filter_state: Architecture-dependent filter table state
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
* indexed by filter ID
* @rps_expire_index: Next index to check for expiry in @rps_flow_id
* @drain_pending: Count of RX and TX queues that haven't been flushed and drained. * @drain_pending: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed. * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called. * Decremented when the efx_flush_rx_queue() is called.
...@@ -832,6 +841,8 @@ struct efx_nic { ...@@ -832,6 +841,8 @@ struct efx_nic {
unsigned rx_dc_base; unsigned rx_dc_base;
unsigned sram_lim_qw; unsigned sram_lim_qw;
unsigned next_buffer_table; unsigned next_buffer_table;
unsigned int max_channels;
unsigned n_channels; unsigned n_channels;
unsigned n_rx_channels; unsigned n_rx_channels;
unsigned rss_spread; unsigned rss_spread;
...@@ -857,6 +868,7 @@ struct efx_nic { ...@@ -857,6 +868,7 @@ struct efx_nic {
struct delayed_work selftest_work; struct delayed_work selftest_work;
#ifdef CONFIG_SFC_MTD #ifdef CONFIG_SFC_MTD
const struct efx_mtd_ops *mtd_ops;
struct list_head mtd_list; struct list_head mtd_list;
#endif #endif
...@@ -883,7 +895,7 @@ struct efx_nic { ...@@ -883,7 +895,7 @@ struct efx_nic {
struct efx_link_state link_state; struct efx_link_state link_state;
unsigned int n_link_state_changes; unsigned int n_link_state_changes;
bool promiscuous; bool unicast_filter;
union efx_multicast_hash multicast_hash; union efx_multicast_hash multicast_hash;
u8 wanted_fc; u8 wanted_fc;
unsigned fc_disable; unsigned fc_disable;
...@@ -894,7 +906,12 @@ struct efx_nic { ...@@ -894,7 +906,12 @@ struct efx_nic {
void *loopback_selftest; void *loopback_selftest;
struct efx_filter_state *filter_state; spinlock_t filter_lock;
void *filter_state;
#ifdef CONFIG_RFS_ACCEL
u32 *rps_flow_id;
unsigned int rps_expire_index;
#endif
atomic_t drain_pending; atomic_t drain_pending;
atomic_t rxq_flush_pending; atomic_t rxq_flush_pending;
...@@ -939,6 +956,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) ...@@ -939,6 +956,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
/** /**
* struct efx_nic_type - Efx device type definition * struct efx_nic_type - Efx device type definition
* @mem_map_size: Get memory BAR mapped size
* @probe: Probe the controller * @probe: Probe the controller
* @remove: Free resources allocated by probe() * @remove: Free resources allocated by probe()
* @init: Initialise the controller * @init: Initialise the controller
...@@ -1011,8 +1029,25 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) ...@@ -1011,8 +1029,25 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @ev_process: Process events for a queue, up to the given NAPI quota * @ev_process: Process events for a queue, up to the given NAPI quota
* @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
* @ev_test_generate: Generate a test event * @ev_test_generate: Generate a test event
* @filter_table_probe: Probe filter capabilities and set up filter software state
* @filter_table_restore: Restore filters removed from hardware
* @filter_table_remove: Remove filters from hardware and tear down software state
* @filter_update_rx_scatter: Update filters after change to rx scatter setting
* @filter_insert: add or replace a filter
* @filter_remove_safe: remove a filter by ID, carefully
* @filter_get_safe: retrieve a filter by ID, carefully
* @filter_clear_rx: remove RX filters by priority
* @filter_count_rx_used: Get the number of filters in use at a given priority
* @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
* @filter_get_rx_ids: Get list of RX filters at a given priority
* @filter_rfs_insert: Add or replace a filter for RFS. This must be
* atomic. The hardware change may be asynchronous but should
* not be delayed for long. It may fail if this can't be done
* atomically.
* @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
* This must check whether the specified table entry is used by RFS
* and that rps_may_expire_flow() returns true for it.
* @revision: Hardware architecture revision * @revision: Hardware architecture revision
* @mem_map_size: Memory BAR mapped size
* @txd_ptr_tbl_base: TX descriptor ring base address * @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address * @rxd_ptr_tbl_base: RX descriptor ring base address
* @buf_tbl_base: Buffer table base address * @buf_tbl_base: Buffer table base address
...@@ -1024,14 +1059,13 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) ...@@ -1024,14 +1059,13 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @can_rx_scatter: NIC is able to scatter packet to multiple buffers * @can_rx_scatter: NIC is able to scatter packet to multiple buffers
* @max_interrupt_mode: Highest capability interrupt mode supported * @max_interrupt_mode: Highest capability interrupt mode supported
* from &enum efx_init_mode. * from &enum efx_init_mode.
* @phys_addr_channels: Number of channels with physically addressed
* descriptors
* @timer_period_max: Maximum period of interrupt timer (in ticks) * @timer_period_max: Maximum period of interrupt timer (in ticks)
* @offload_features: net_device feature flags for protocol offload * @offload_features: net_device feature flags for protocol offload
* features implemented in hardware * features implemented in hardware
* @mcdi_max_ver: Maximum MCDI version supported * @mcdi_max_ver: Maximum MCDI version supported
*/ */
struct efx_nic_type { struct efx_nic_type {
unsigned int (*mem_map_size)(struct efx_nic *efx);
int (*probe)(struct efx_nic *efx); int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx); void (*remove)(struct efx_nic *efx);
int (*init)(struct efx_nic *efx); int (*init)(struct efx_nic *efx);
...@@ -1090,9 +1124,34 @@ struct efx_nic_type { ...@@ -1090,9 +1124,34 @@ struct efx_nic_type {
int (*ev_process)(struct efx_channel *channel, int quota); int (*ev_process)(struct efx_channel *channel, int quota);
void (*ev_read_ack)(struct efx_channel *channel); void (*ev_read_ack)(struct efx_channel *channel);
void (*ev_test_generate)(struct efx_channel *channel); void (*ev_test_generate)(struct efx_channel *channel);
int (*filter_table_probe)(struct efx_nic *efx);
void (*filter_table_restore)(struct efx_nic *efx);
void (*filter_table_remove)(struct efx_nic *efx);
void (*filter_update_rx_scatter)(struct efx_nic *efx);
s32 (*filter_insert)(struct efx_nic *efx,
struct efx_filter_spec *spec, bool replace);
int (*filter_remove_safe)(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id);
int (*filter_get_safe)(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *);
void (*filter_clear_rx)(struct efx_nic *efx,
enum efx_filter_priority priority);
u32 (*filter_count_rx_used)(struct efx_nic *efx,
enum efx_filter_priority priority);
u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
s32 (*filter_get_rx_ids)(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 *buf, u32 size);
#ifdef CONFIG_RFS_ACCEL
s32 (*filter_rfs_insert)(struct efx_nic *efx,
struct efx_filter_spec *spec);
bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
unsigned int index);
#endif
int revision; int revision;
unsigned int mem_map_size;
unsigned int txd_ptr_tbl_base; unsigned int txd_ptr_tbl_base;
unsigned int rxd_ptr_tbl_base; unsigned int rxd_ptr_tbl_base;
unsigned int buf_tbl_base; unsigned int buf_tbl_base;
...@@ -1103,10 +1162,10 @@ struct efx_nic_type { ...@@ -1103,10 +1162,10 @@ struct efx_nic_type {
unsigned int rx_buffer_padding; unsigned int rx_buffer_padding;
bool can_rx_scatter; bool can_rx_scatter;
unsigned int max_interrupt_mode; unsigned int max_interrupt_mode;
unsigned int phys_addr_channels;
unsigned int timer_period_max; unsigned int timer_period_max;
netdev_features_t offload_features; netdev_features_t offload_features;
int mcdi_max_ver; int mcdi_max_ver;
unsigned int max_rx_ip_filters;
}; };
/************************************************************************** /**************************************************************************
......
...@@ -184,8 +184,8 @@ struct falcon_nic_data { ...@@ -184,8 +184,8 @@ struct falcon_nic_data {
bool stats_pending; bool stats_pending;
struct timer_list stats_timer; struct timer_list stats_timer;
u32 *stats_dma_done; u32 *stats_dma_done;
struct efx_spi_device spi_flash; struct falcon_spi_device spi_flash;
struct efx_spi_device spi_eeprom; struct falcon_spi_device spi_eeprom;
struct mutex spi_lock; struct mutex spi_lock;
struct mutex mdio_lock; struct mutex mdio_lock;
bool xmac_poll_required; bool xmac_poll_required;
...@@ -404,6 +404,35 @@ extern int efx_farch_ev_process(struct efx_channel *channel, int quota); ...@@ -404,6 +404,35 @@ extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
extern void efx_farch_ev_read_ack(struct efx_channel *channel); extern void efx_farch_ev_read_ack(struct efx_channel *channel);
extern void efx_farch_ev_test_generate(struct efx_channel *channel); extern void efx_farch_ev_test_generate(struct efx_channel *channel);
/* Falcon/Siena filter operations */
extern int efx_farch_filter_table_probe(struct efx_nic *efx);
extern void efx_farch_filter_table_restore(struct efx_nic *efx);
extern void efx_farch_filter_table_remove(struct efx_nic *efx);
extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
extern s32 efx_farch_filter_insert(struct efx_nic *efx,
struct efx_filter_spec *spec, bool replace);
extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id);
extern int efx_farch_filter_get_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *);
extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority);
extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority);
extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 *buf, u32 size);
#ifdef CONFIG_RFS_ACCEL
extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
struct efx_filter_spec *spec);
extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
unsigned int index);
#endif
extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
extern bool efx_nic_event_present(struct efx_channel *channel); extern bool efx_nic_event_present(struct efx_channel *channel);
/* Some statistics are computed as A - B where A and B each increase /* Some statistics are computed as A - B where A and B each increase
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <net/checksum.h> #include <net/checksum.h>
#include "net_driver.h" #include "net_driver.h"
#include "efx.h" #include "efx.h"
#include "filter.h"
#include "nic.h" #include "nic.h"
#include "selftest.h" #include "selftest.h"
#include "workarounds.h" #include "workarounds.h"
...@@ -802,3 +803,96 @@ module_param(rx_refill_threshold, uint, 0444); ...@@ -802,3 +803,96 @@ module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold, MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring refill threshold (%)"); "RX descriptor ring refill threshold (%)");
#ifdef CONFIG_RFS_ACCEL
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_filter_spec spec;
const struct iphdr *ip;
const __be16 *ports;
int nhoff;
int rc;
nhoff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_8021Q)) {
EFX_BUG_ON_PARANOID(skb_headlen(skb) <
nhoff + sizeof(struct vlan_hdr));
if (((const struct vlan_hdr *)skb->data + nhoff)->
h_vlan_encapsulated_proto != htons(ETH_P_IP))
return -EPROTONOSUPPORT;
/* This is IP over 802.1q VLAN. We can't filter on the
* IP 5-tuple and the vlan together, so just strip the
* vlan header and filter on the IP part.
*/
nhoff += sizeof(struct vlan_hdr);
} else if (skb->protocol != htons(ETH_P_IP)) {
return -EPROTONOSUPPORT;
}
/* RFS must validate the IP header length before calling us */
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
ip = (const struct iphdr *)(skb->data + nhoff);
if (ip_is_fragment(ip))
return -EPROTONOSUPPORT;
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
rxq_index);
rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
ip->daddr, ports[1], ip->saddr, ports[0]);
if (rc)
return rc;
rc = efx->type->filter_rfs_insert(efx, &spec);
if (rc < 0)
return rc;
/* Remember this so we can check whether to expire the filter later */
efx->rps_flow_id[rc] = flow_id;
channel = efx_get_channel(efx, skb_get_rx_queue(skb));
++channel->rfs_filters_added;
netif_info(efx, rx_status, efx->net_dev,
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
(ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
&ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
rxq_index, flow_id, rc);
return rc;
}
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
{
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
unsigned int index, size;
u32 flow_id;
if (!spin_trylock_bh(&efx->filter_lock))
return false;
expire_one = efx->type->filter_rfs_expire_one;
index = efx->rps_expire_index;
size = efx->type->max_rx_ip_filters;
while (quota--) {
flow_id = efx->rps_flow_id[index];
if (expire_one(efx, flow_id, index))
netif_info(efx, rx_status, efx->net_dev,
"expired filter %d [flow %u]\n",
index, flow_id);
if (++index == size)
index = 0;
}
efx->rps_expire_index = index;
spin_unlock_bh(&efx->filter_lock);
return true;
}
#endif /* CONFIG_RFS_ACCEL */
...@@ -187,6 +187,12 @@ static void siena_dimension_resources(struct efx_nic *efx) ...@@ -187,6 +187,12 @@ static void siena_dimension_resources(struct efx_nic *efx)
efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
} }
static unsigned int siena_mem_map_size(struct efx_nic *efx)
{
return FR_CZ_MC_TREG_SMEM +
FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS;
}
static int siena_probe_nic(struct efx_nic *efx) static int siena_probe_nic(struct efx_nic *efx)
{ {
struct siena_nic_data *nic_data; struct siena_nic_data *nic_data;
...@@ -207,6 +213,8 @@ static int siena_probe_nic(struct efx_nic *efx) ...@@ -207,6 +213,8 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail1; goto fail1;
} }
efx->max_channels = EFX_MAX_CHANNELS;
efx_reado(efx, &reg, FR_AZ_CS_DEBUG); efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
...@@ -495,6 +503,8 @@ static int siena_mac_reconfigure(struct efx_nic *efx) ...@@ -495,6 +503,8 @@ static int siena_mac_reconfigure(struct efx_nic *efx)
MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST + MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST +
sizeof(efx->multicast_hash)); sizeof(efx->multicast_hash));
efx_farch_filter_sync_rx_mode(efx);
WARN_ON(!mutex_is_locked(&efx->mac_lock)); WARN_ON(!mutex_is_locked(&efx->mac_lock));
rc = efx_mcdi_set_mac(efx); rc = efx_mcdi_set_mac(efx);
...@@ -670,6 +680,7 @@ static int siena_mcdi_poll_reboot(struct efx_nic *efx) ...@@ -670,6 +680,7 @@ static int siena_mcdi_poll_reboot(struct efx_nic *efx)
*/ */
const struct efx_nic_type siena_a0_nic_type = { const struct efx_nic_type siena_a0_nic_type = {
.mem_map_size = siena_mem_map_size,
.probe = siena_probe_nic, .probe = siena_probe_nic,
.remove = siena_remove_nic, .remove = siena_remove_nic,
.init = siena_init_nic, .init = siena_init_nic,
...@@ -727,10 +738,23 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -727,10 +738,23 @@ const struct efx_nic_type siena_a0_nic_type = {
.ev_process = efx_farch_ev_process, .ev_process = efx_farch_ev_process,
.ev_read_ack = efx_farch_ev_read_ack, .ev_read_ack = efx_farch_ev_read_ack,
.ev_test_generate = efx_farch_ev_test_generate, .ev_test_generate = efx_farch_ev_test_generate,
.filter_table_probe = efx_farch_filter_table_probe,
.filter_table_restore = efx_farch_filter_table_restore,
.filter_table_remove = efx_farch_filter_table_remove,
.filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
.filter_insert = efx_farch_filter_insert,
.filter_remove_safe = efx_farch_filter_remove_safe,
.filter_get_safe = efx_farch_filter_get_safe,
.filter_clear_rx = efx_farch_filter_clear_rx,
.filter_count_rx_used = efx_farch_filter_count_rx_used,
.filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
.filter_get_rx_ids = efx_farch_filter_get_rx_ids,
#ifdef CONFIG_RFS_ACCEL
.filter_rfs_insert = efx_farch_filter_rfs_insert,
.filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
#endif
.revision = EFX_REV_SIENA_A0, .revision = EFX_REV_SIENA_A0,
.mem_map_size = (FR_CZ_MC_TREG_SMEM +
FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL, .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
...@@ -741,11 +765,9 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -741,11 +765,9 @@ const struct efx_nic_type siena_a0_nic_type = {
.rx_buffer_padding = 0, .rx_buffer_padding = 0,
.can_rx_scatter = true, .can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX, .max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32
* channels */
.timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXHASH | NETIF_F_NTUPLE), NETIF_F_RXHASH | NETIF_F_NTUPLE),
.mcdi_max_ver = 1, .mcdi_max_ver = 1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
}; };
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#define SPI_STATUS_NRDY 0x01 /* Device busy flag */ #define SPI_STATUS_NRDY 0x01 /* Device busy flag */
/** /**
* struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
* @device_id: Controller's id for the device * @device_id: Controller's id for the device
* @size: Size (in bytes) * @size: Size (in bytes)
* @addr_len: Number of address bytes in read/write commands * @addr_len: Number of address bytes in read/write commands
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
* @block_size: Write block size (in bytes). * @block_size: Write block size (in bytes).
* Write commands are limited to blocks with this size and alignment. * Write commands are limited to blocks with this size and alignment.
*/ */
struct efx_spi_device { struct falcon_spi_device {
int device_id; int device_id;
unsigned int size; unsigned int size;
unsigned int addr_len; unsigned int addr_len;
...@@ -61,21 +61,21 @@ struct efx_spi_device { ...@@ -61,21 +61,21 @@ struct efx_spi_device {
unsigned int block_size; unsigned int block_size;
}; };
static inline bool efx_spi_present(const struct efx_spi_device *spi) static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
{ {
return spi->size != 0; return spi->size != 0;
} }
int falcon_spi_cmd(struct efx_nic *efx, int falcon_spi_cmd(struct efx_nic *efx,
const struct efx_spi_device *spi, unsigned int command, const struct falcon_spi_device *spi, unsigned int command,
int address, const void *in, void *out, size_t len); int address, const void *in, void *out, size_t len);
int falcon_spi_wait_write(struct efx_nic *efx, int falcon_spi_wait_write(struct efx_nic *efx,
const struct efx_spi_device *spi); const struct falcon_spi_device *spi);
int falcon_spi_read(struct efx_nic *efx, int falcon_spi_read(struct efx_nic *efx,
const struct efx_spi_device *spi, loff_t start, const struct falcon_spi_device *spi, loff_t start,
size_t len, size_t *retlen, u8 *buffer); size_t len, size_t *retlen, u8 *buffer);
int falcon_spi_write(struct efx_nic *efx, int falcon_spi_write(struct efx_nic *efx,
const struct efx_spi_device *spi, loff_t start, const struct falcon_spi_device *spi, loff_t start,
size_t len, size_t *retlen, const u8 *buffer); size_t len, size_t *retlen, const u8 *buffer);
/* /*
...@@ -93,7 +93,7 @@ int falcon_spi_write(struct efx_nic *efx, ...@@ -93,7 +93,7 @@ int falcon_spi_write(struct efx_nic *efx,
*/ */
#define FALCON_NVCONFIG_END 0x400U #define FALCON_NVCONFIG_END 0x400U
#define FALCON_FLASH_BOOTCODE_START 0x8000U #define FALCON_FLASH_BOOTCODE_START 0x8000U
#define EFX_EEPROM_BOOTCONFIG_START 0x800U #define FALCON_EEPROM_BOOTCONFIG_START 0x800U
#define EFX_EEPROM_BOOTCONFIG_END 0x1800U #define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
#endif /* EFX_SPI_H */ #endif /* EFX_SPI_H */
...@@ -15,25 +15,15 @@ ...@@ -15,25 +15,15 @@
* Bug numbers are from Solarflare's Bugzilla. * Bug numbers are from Solarflare's Bugzilla.
*/ */
#define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) #define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0) #define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) #define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
#define EFX_WORKAROUND_10G(efx) 1 #define EFX_WORKAROUND_10G(efx) 1
/* XAUI resets if link not detected */
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
/* RX PCIe double split performance issue */
#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
/* Bit-bashed I2C reads cause performance drop */ /* Bit-bashed I2C reads cause performance drop */
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G #define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
* or a PCIe error (bug 11028) */
#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
/* Truncated IPv4 packets can confuse the TX packet parser */ /* Truncated IPv4 packets can confuse the TX packet parser */
#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB #define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
/* Legacy ISR read can return zero once */
#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */ /* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment