Commit 45cc3a0c authored by David S. Miller's avatar David S. Miller

Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next

Ben Hutchings says:

====================
More refactoring and cleanup, particularly around filter management.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 35fdb94b b766630b
sfc-y += efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \ sfc-y += efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \
filter.o \
selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
tenxpress.o txc43128_phy.o falcon_boards.o \ tenxpress.o txc43128_phy.o falcon_boards.o \
mcdi.o mcdi_port.o mcdi_mon.o ptp.o mcdi.o mcdi_port.o mcdi_mon.o ptp.o
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/in.h> #include <linux/in.h>
#include <linux/crc32.h>
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/topology.h> #include <linux/topology.h>
#include <linux/gfp.h> #include <linux/gfp.h>
...@@ -339,6 +338,7 @@ static void efx_init_eventq(struct efx_channel *channel) ...@@ -339,6 +338,7 @@ static void efx_init_eventq(struct efx_channel *channel)
channel->eventq_read_ptr = 0; channel->eventq_read_ptr = 0;
efx_nic_init_eventq(channel); efx_nic_init_eventq(channel);
channel->eventq_init = true;
} }
/* Enable event queue processing and NAPI */ /* Enable event queue processing and NAPI */
...@@ -367,10 +367,14 @@ static void efx_stop_eventq(struct efx_channel *channel) ...@@ -367,10 +367,14 @@ static void efx_stop_eventq(struct efx_channel *channel)
static void efx_fini_eventq(struct efx_channel *channel) static void efx_fini_eventq(struct efx_channel *channel)
{ {
if (!channel->eventq_init)
return;
netif_dbg(channel->efx, drv, channel->efx->net_dev, netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d fini event queue\n", channel->channel); "chan %d fini event queue\n", channel->channel);
efx_nic_fini_eventq(channel); efx_nic_fini_eventq(channel);
channel->eventq_init = false;
} }
static void efx_remove_eventq(struct efx_channel *channel) static void efx_remove_eventq(struct efx_channel *channel)
...@@ -606,7 +610,7 @@ static void efx_start_datapath(struct efx_nic *efx) ...@@ -606,7 +610,7 @@ static void efx_start_datapath(struct efx_nic *efx)
/* RX filters also have scatter-enabled flags */ /* RX filters also have scatter-enabled flags */
if (efx->rx_scatter != old_rx_scatter) if (efx->rx_scatter != old_rx_scatter)
efx_filter_update_rx_scatter(efx); efx->type->filter_update_rx_scatter(efx);
/* We must keep at least one descriptor in a TX ring empty. /* We must keep at least one descriptor in a TX ring empty.
* We could avoid this when the queue size does not exactly * We could avoid this when the queue size does not exactly
...@@ -871,10 +875,9 @@ void efx_link_status_changed(struct efx_nic *efx) ...@@ -871,10 +875,9 @@ void efx_link_status_changed(struct efx_nic *efx)
/* Status message for kernel log */ /* Status message for kernel log */
if (link_state->up) if (link_state->up)
netif_info(efx, link, efx->net_dev, netif_info(efx, link, efx->net_dev,
"link up at %uMbps %s-duplex (MTU %d)%s\n", "link up at %uMbps %s-duplex (MTU %d)\n",
link_state->speed, link_state->fd ? "full" : "half", link_state->speed, link_state->fd ? "full" : "half",
efx->net_dev->mtu, efx->net_dev->mtu);
(efx->promiscuous ? " [PROMISC]" : ""));
else else
netif_info(efx, link, efx->net_dev, "link down\n"); netif_info(efx, link, efx->net_dev, "link down\n");
} }
...@@ -923,10 +926,6 @@ int __efx_reconfigure_port(struct efx_nic *efx) ...@@ -923,10 +926,6 @@ int __efx_reconfigure_port(struct efx_nic *efx)
WARN_ON(!mutex_is_locked(&efx->mac_lock)); WARN_ON(!mutex_is_locked(&efx->mac_lock));
/* Serialise the promiscuous flag with efx_set_rx_mode. */
netif_addr_lock_bh(efx->net_dev);
netif_addr_unlock_bh(efx->net_dev);
/* Disable PHY transmit in mac level loopbacks */ /* Disable PHY transmit in mac level loopbacks */
phy_mode = efx->phy_mode; phy_mode = efx->phy_mode;
if (LOOPBACK_INTERNAL(efx)) if (LOOPBACK_INTERNAL(efx))
...@@ -1084,6 +1083,7 @@ static int efx_init_io(struct efx_nic *efx) ...@@ -1084,6 +1083,7 @@ static int efx_init_io(struct efx_nic *efx)
{ {
struct pci_dev *pci_dev = efx->pci_dev; struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask; dma_addr_t dma_mask = efx->type->max_dma_mask;
unsigned int mem_map_size = efx->type->mem_map_size(efx);
int rc; int rc;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
...@@ -1136,20 +1136,18 @@ static int efx_init_io(struct efx_nic *efx) ...@@ -1136,20 +1136,18 @@ static int efx_init_io(struct efx_nic *efx)
rc = -EIO; rc = -EIO;
goto fail3; goto fail3;
} }
efx->membase = ioremap_nocache(efx->membase_phys, efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
efx->type->mem_map_size);
if (!efx->membase) { if (!efx->membase) {
netif_err(efx, probe, efx->net_dev, netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n", "could not map memory BAR at %llx+%x\n",
(unsigned long long)efx->membase_phys, (unsigned long long)efx->membase_phys, mem_map_size);
efx->type->mem_map_size);
rc = -ENOMEM; rc = -ENOMEM;
goto fail4; goto fail4;
} }
netif_dbg(efx, probe, efx->net_dev, netif_dbg(efx, probe, efx->net_dev,
"memory BAR at %llx+%x (virtual %p)\n", "memory BAR at %llx+%x (virtual %p)\n",
(unsigned long long)efx->membase_phys, (unsigned long long)efx->membase_phys, mem_map_size,
efx->type->mem_map_size, efx->membase); efx->membase);
return 0; return 0;
...@@ -1228,8 +1226,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) ...@@ -1228,8 +1226,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
*/ */
static int efx_probe_interrupts(struct efx_nic *efx) static int efx_probe_interrupts(struct efx_nic *efx)
{ {
unsigned int max_channels =
min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
unsigned int extra_channels = 0; unsigned int extra_channels = 0;
unsigned int i, j; unsigned int i, j;
int rc; int rc;
...@@ -1246,7 +1242,7 @@ static int efx_probe_interrupts(struct efx_nic *efx) ...@@ -1246,7 +1242,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
if (separate_tx_channels) if (separate_tx_channels)
n_channels *= 2; n_channels *= 2;
n_channels += extra_channels; n_channels += extra_channels;
n_channels = min(n_channels, max_channels); n_channels = min(n_channels, efx->max_channels);
for (i = 0; i < n_channels; i++) for (i = 0; i < n_channels; i++)
xentries[i].entry = i; xentries[i].entry = i;
...@@ -1497,6 +1493,44 @@ static void efx_remove_nic(struct efx_nic *efx) ...@@ -1497,6 +1493,44 @@ static void efx_remove_nic(struct efx_nic *efx)
efx->type->remove(efx); efx->type->remove(efx);
} }
static int efx_probe_filters(struct efx_nic *efx)
{
int rc;
spin_lock_init(&efx->filter_lock);
rc = efx->type->filter_table_probe(efx);
if (rc)
return rc;
#ifdef CONFIG_RFS_ACCEL
if (efx->type->offload_features & NETIF_F_NTUPLE) {
efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
sizeof(*efx->rps_flow_id),
GFP_KERNEL);
if (!efx->rps_flow_id) {
efx->type->filter_table_remove(efx);
return -ENOMEM;
}
}
#endif
return 0;
}
static void efx_remove_filters(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
kfree(efx->rps_flow_id);
#endif
efx->type->filter_table_remove(efx);
}
static void efx_restore_filters(struct efx_nic *efx)
{
efx->type->filter_table_restore(efx);
}
/************************************************************************** /**************************************************************************
* *
* NIC startup/shutdown * NIC startup/shutdown
...@@ -1987,30 +2021,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) ...@@ -1987,30 +2021,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
static void efx_set_rx_mode(struct net_device *net_dev) static void efx_set_rx_mode(struct net_device *net_dev)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
struct netdev_hw_addr *ha;
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
u32 crc;
int bit;
efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
/* Build multicast hash table */
if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
netdev_for_each_mc_addr(ha, net_dev) {
crc = ether_crc_le(ETH_ALEN, ha->addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
__set_bit_le(bit, mc_hash);
}
/* Broadcast packets go through the multicast hash filter.
* ether_crc_le() of the broadcast address is 0xbe2612ff
* so we always add bit 0xff to the mask.
*/
__set_bit_le(0xff, mc_hash);
}
if (efx->port_enabled) if (efx->port_enabled)
queue_work(efx->workqueue, &efx->mac_work); queue_work(efx->workqueue, &efx->mac_work);
...@@ -2489,8 +2499,6 @@ static int efx_init_struct(struct efx_nic *efx, ...@@ -2489,8 +2499,6 @@ static int efx_init_struct(struct efx_nic *efx,
efx->msi_context[i].index = i; efx->msi_context[i].index = i;
} }
EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
/* Higher numbered interrupt modes are less capable! */ /* Higher numbered interrupt modes are less capable! */
efx->interrupt_mode = max(efx->type->max_interrupt_mode, efx->interrupt_mode = max(efx->type->max_interrupt_mode,
interrupt_mode); interrupt_mode);
......
...@@ -68,27 +68,92 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); ...@@ -68,27 +68,92 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) #define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
/* Filters */ /* Filters */
extern int efx_probe_filters(struct efx_nic *efx);
extern void efx_restore_filters(struct efx_nic *efx); /**
extern void efx_remove_filters(struct efx_nic *efx); * efx_filter_insert_filter - add or replace a filter
extern void efx_filter_update_rx_scatter(struct efx_nic *efx); * @efx: NIC in which to insert the filter
extern s32 efx_filter_insert_filter(struct efx_nic *efx, * @spec: Specification for the filter
* @replace_equal: Flag for whether the specified filter may replace an
* existing filter with equal priority
*
* On success, return the filter ID.
* On failure, return a negative error code.
*
* If an existing filter has equal match values to the new filter
* spec, then the new filter might replace it, depending on the
* relative priorities. If the existing filter has lower priority, or
* if @replace_equal is set and it has equal priority, then it is
* replaced. Otherwise the function fails, returning -%EPERM if
* the existing filter has higher priority or -%EEXIST if it has
* equal priority.
*/
static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
struct efx_filter_spec *spec, struct efx_filter_spec *spec,
bool replace); bool replace_equal)
extern int efx_filter_remove_id_safe(struct efx_nic *efx, {
return efx->type->filter_insert(efx, spec, replace_equal);
}
/**
* efx_filter_remove_id_safe - remove a filter by ID, carefully
* @efx: NIC from which to remove the filter
* @priority: Priority of filter, as passed to @efx_filter_insert_filter
* @filter_id: ID of filter, as returned by @efx_filter_insert_filter
*
* This function will range-check @filter_id, so it is safe to call
* with a value passed from userland.
*/
static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
enum efx_filter_priority priority, enum efx_filter_priority priority,
u32 filter_id); u32 filter_id)
extern int efx_filter_get_filter_safe(struct efx_nic *efx, {
return efx->type->filter_remove_safe(efx, priority, filter_id);
}
/**
* efx_filter_get_filter_safe - retrieve a filter by ID, carefully
* @efx: NIC from which to remove the filter
* @priority: Priority of filter, as passed to @efx_filter_insert_filter
* @filter_id: ID of filter, as returned by @efx_filter_insert_filter
* @spec: Buffer in which to store filter specification
*
* This function will range-check @filter_id, so it is safe to call
* with a value passed from userland.
*/
static inline int
efx_filter_get_filter_safe(struct efx_nic *efx,
enum efx_filter_priority priority, enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *); u32 filter_id, struct efx_filter_spec *spec)
extern void efx_filter_clear_rx(struct efx_nic *efx, {
enum efx_filter_priority priority); return efx->type->filter_get_safe(efx, priority, filter_id, spec);
extern u32 efx_filter_count_rx_used(struct efx_nic *efx, }
enum efx_filter_priority priority);
extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx); /**
extern s32 efx_filter_get_rx_ids(struct efx_nic *efx, * efx_farch_filter_clear_rx - remove RX filters by priority
* @efx: NIC from which to remove the filters
* @priority: Maximum priority to remove
*/
static inline void efx_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority)
{
return efx->type->filter_clear_rx(efx, priority);
}
static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority)
{
return efx->type->filter_count_rx_used(efx, priority);
}
static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
{
return efx->type->filter_get_rx_id_limit(efx);
}
static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
enum efx_filter_priority priority, enum efx_filter_priority priority,
u32 *buf, u32 size); u32 *buf, u32 size)
{
return efx->type->filter_get_rx_ids(efx, priority, buf, size);
}
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id); u16 rxq_index, u32 flow_id);
......
...@@ -799,11 +799,12 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) ...@@ -799,11 +799,12 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
return efx_reset(efx, rc); return efx_reset(efx, rc);
} }
/* MAC address mask including only MC flag */ /* MAC address mask including only I/G bit */
static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
#define IP4_ADDR_FULL_MASK ((__force __be32)~0) #define IP4_ADDR_FULL_MASK ((__force __be32)~0)
#define PORT_FULL_MASK ((__force __be16)~0) #define PORT_FULL_MASK ((__force __be16)~0)
#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
static int efx_ethtool_get_class_rule(struct efx_nic *efx, static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethtool_rx_flow_spec *rule) struct ethtool_rx_flow_spec *rule)
...@@ -813,8 +814,6 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, ...@@ -813,8 +814,6 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethhdr *mac_entry = &rule->h_u.ether_spec; struct ethhdr *mac_entry = &rule->h_u.ether_spec;
struct ethhdr *mac_mask = &rule->m_u.ether_spec; struct ethhdr *mac_mask = &rule->m_u.ether_spec;
struct efx_filter_spec spec; struct efx_filter_spec spec;
u16 vid;
u8 proto;
int rc; int rc;
rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL, rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
...@@ -822,44 +821,72 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, ...@@ -822,44 +821,72 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
if (rc) if (rc)
return rc; return rc;
if (spec.dmaq_id == 0xfff) if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
rule->ring_cookie = RX_CLS_FLOW_DISC; rule->ring_cookie = RX_CLS_FLOW_DISC;
else else
rule->ring_cookie = spec.dmaq_id; rule->ring_cookie = spec.dmaq_id;
if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) { if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
rule->flow_type = ETHER_FLOW; spec.ether_type == htons(ETH_P_IP) &&
memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN); (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
if (spec.type == EFX_FILTER_MC_DEF) (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN); !(spec.match_flags &
return 0; ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
TCP_V4_FLOW : UDP_V4_FLOW);
if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
ip_entry->ip4dst = spec.loc_host[0];
ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
} }
if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest); ip_entry->ip4src = spec.rem_host[0];
if (rc == 0) { ip_mask->ip4src = IP4_ADDR_FULL_MASK;
}
if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
ip_entry->pdst = spec.loc_port;
ip_mask->pdst = PORT_FULL_MASK;
}
if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
ip_entry->psrc = spec.rem_port;
ip_mask->psrc = PORT_FULL_MASK;
}
} else if (!(spec.match_flags &
~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
EFX_FILTER_MATCH_OUTER_VID))) {
rule->flow_type = ETHER_FLOW; rule->flow_type = ETHER_FLOW;
if (spec.match_flags &
(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN);
if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
memset(mac_mask->h_dest, ~0, ETH_ALEN); memset(mac_mask->h_dest, ~0, ETH_ALEN);
if (vid != EFX_FILTER_VID_UNSPEC) { else
rule->flow_type |= FLOW_EXT; memcpy(mac_mask->h_dest, mac_addr_ig_mask,
rule->h_ext.vlan_tci = htons(vid); ETH_ALEN);
rule->m_ext.vlan_tci = htons(0xfff);
} }
return 0; if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN);
memset(mac_mask->h_source, ~0, ETH_ALEN);
}
if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
mac_entry->h_proto = spec.ether_type;
mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
}
} else {
/* The above should handle all filters that we insert */
WARN_ON(1);
return -EINVAL;
} }
rc = efx_filter_get_ipv4_local(&spec, &proto, if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
&ip_entry->ip4dst, &ip_entry->pdst); rule->flow_type |= FLOW_EXT;
if (rc != 0) { rule->h_ext.vlan_tci = spec.outer_vid;
rc = efx_filter_get_ipv4_full( rule->m_ext.vlan_tci = htons(0xfff);
&spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
&ip_entry->ip4src, &ip_entry->psrc);
EFX_WARN_ON_PARANOID(rc);
ip_mask->ip4src = IP4_ADDR_FULL_MASK;
ip_mask->psrc = PORT_FULL_MASK;
} }
rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
ip_mask->pdst = PORT_FULL_MASK;
return rc; return rc;
} }
...@@ -967,82 +994,80 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, ...@@ -967,82 +994,80 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
(rule->ring_cookie == RX_CLS_FLOW_DISC) ? (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
0xfff : rule->ring_cookie); EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
switch (rule->flow_type) { switch (rule->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW: case TCP_V4_FLOW:
case UDP_V4_FLOW: { case UDP_V4_FLOW:
u8 proto = (rule->flow_type == TCP_V4_FLOW ? spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
EFX_FILTER_MATCH_IP_PROTO);
spec.ether_type = htons(ETH_P_IP);
spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
IPPROTO_TCP : IPPROTO_UDP); IPPROTO_TCP : IPPROTO_UDP);
if (ip_mask->ip4dst) {
/* Must match all of destination, */ if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK &&
ip_mask->pdst == PORT_FULL_MASK))
return -EINVAL; return -EINVAL;
/* all or none of source, */ spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
if ((ip_mask->ip4src || ip_mask->psrc) && spec.loc_host[0] = ip_entry->ip4dst;
!(ip_mask->ip4src == IP4_ADDR_FULL_MASK && }
ip_mask->psrc == PORT_FULL_MASK)) if (ip_mask->ip4src) {
if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
return -EINVAL; return -EINVAL;
/* and nothing else */ spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
if (ip_mask->tos || rule->m_ext.vlan_tci) spec.rem_host[0] = ip_entry->ip4src;
}
if (ip_mask->pdst) {
if (ip_mask->pdst != PORT_FULL_MASK)
return -EINVAL; return -EINVAL;
spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
if (ip_mask->ip4src) spec.loc_port = ip_entry->pdst;
rc = efx_filter_set_ipv4_full(&spec, proto,
ip_entry->ip4dst,
ip_entry->pdst,
ip_entry->ip4src,
ip_entry->psrc);
else
rc = efx_filter_set_ipv4_local(&spec, proto,
ip_entry->ip4dst,
ip_entry->pdst);
if (rc)
return rc;
break;
} }
if (ip_mask->psrc) {
case ETHER_FLOW | FLOW_EXT: if (ip_mask->psrc != PORT_FULL_MASK)
case ETHER_FLOW: {
u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ?
ntohs(rule->m_ext.vlan_tci) : 0);
/* Must not match on source address or Ethertype */
if (!is_zero_ether_addr(mac_mask->h_source) ||
mac_mask->h_proto)
return -EINVAL; return -EINVAL;
spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
spec.rem_port = ip_entry->psrc;
}
if (ip_mask->tos)
return -EINVAL;
break;
/* Is it a default UC or MC filter? */ case ETHER_FLOW:
if (ether_addr_equal(mac_mask->h_dest, mac_addr_mc_mask) && if (!is_zero_ether_addr(mac_mask->h_dest)) {
vlan_tag_mask == 0) { if (ether_addr_equal(mac_mask->h_dest,
if (is_multicast_ether_addr(mac_entry->h_dest)) mac_addr_ig_mask))
rc = efx_filter_set_mc_def(&spec); spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
else if (is_broadcast_ether_addr(mac_mask->h_dest))
spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
else else
rc = efx_filter_set_uc_def(&spec); return -EINVAL;
memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN);
} }
/* Otherwise, it must match all of destination and all if (!is_zero_ether_addr(mac_mask->h_source)) {
* or none of VID. if (!is_broadcast_ether_addr(mac_mask->h_source))
*/ return -EINVAL;
else if (is_broadcast_ether_addr(mac_mask->h_dest) && spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
(vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) { memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN);
rc = efx_filter_set_eth_local(
&spec,
vlan_tag_mask ?
ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
mac_entry->h_dest);
} else {
rc = -EINVAL;
} }
if (rc) if (mac_mask->h_proto) {
return rc; if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
break; return -EINVAL;
spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
spec.ether_type = mac_entry->h_proto;
} }
break;
default: default:
return -EINVAL; return -EINVAL;
} }
if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
if (rule->m_ext.vlan_tci != htons(0xfff))
return -EINVAL;
spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
spec.outer_vid = rule->h_ext.vlan_tci;
}
rc = efx_filter_insert_filter(efx, &spec, true); rc = efx_filter_insert_filter(efx, &spec, true);
if (rc < 0) if (rc < 0)
return rc; return rc;
......
...@@ -434,7 +434,7 @@ static int falcon_spi_wait(struct efx_nic *efx) ...@@ -434,7 +434,7 @@ static int falcon_spi_wait(struct efx_nic *efx)
} }
} }
int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi, int falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
unsigned int command, int address, unsigned int command, int address,
const void *in, void *out, size_t len) const void *in, void *out, size_t len)
{ {
...@@ -491,14 +491,14 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi, ...@@ -491,14 +491,14 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
} }
static size_t static size_t
falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start) falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
{ {
return min(FALCON_SPI_MAX_LEN, return min(FALCON_SPI_MAX_LEN,
(spi->block_size - (start & (spi->block_size - 1)))); (spi->block_size - (start & (spi->block_size - 1))));
} }
static inline u8 static inline u8
efx_spi_munge_command(const struct efx_spi_device *spi, falcon_spi_munge_command(const struct falcon_spi_device *spi,
const u8 command, const unsigned int address) const u8 command, const unsigned int address)
{ {
return command | (((address >> 8) & spi->munge_address) << 3); return command | (((address >> 8) & spi->munge_address) << 3);
...@@ -506,7 +506,7 @@ efx_spi_munge_command(const struct efx_spi_device *spi, ...@@ -506,7 +506,7 @@ efx_spi_munge_command(const struct efx_spi_device *spi,
/* Wait up to 10 ms for buffered write completion */ /* Wait up to 10 ms for buffered write completion */
int int
falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi) falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi)
{ {
unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100); unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
u8 status; u8 status;
...@@ -530,7 +530,7 @@ falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi) ...@@ -530,7 +530,7 @@ falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
} }
} }
int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi, int falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi,
loff_t start, size_t len, size_t *retlen, u8 *buffer) loff_t start, size_t len, size_t *retlen, u8 *buffer)
{ {
size_t block_len, pos = 0; size_t block_len, pos = 0;
...@@ -540,7 +540,7 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi, ...@@ -540,7 +540,7 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
while (pos < len) { while (pos < len) {
block_len = min(len - pos, FALCON_SPI_MAX_LEN); block_len = min(len - pos, FALCON_SPI_MAX_LEN);
command = efx_spi_munge_command(spi, SPI_READ, start + pos); command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL, rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
buffer + pos, block_len); buffer + pos, block_len);
if (rc) if (rc)
...@@ -561,7 +561,7 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi, ...@@ -561,7 +561,7 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
} }
int int
falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi, falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi,
loff_t start, size_t len, size_t *retlen, const u8 *buffer) loff_t start, size_t len, size_t *retlen, const u8 *buffer)
{ {
u8 verify_buffer[FALCON_SPI_MAX_LEN]; u8 verify_buffer[FALCON_SPI_MAX_LEN];
...@@ -576,7 +576,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi, ...@@ -576,7 +576,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
block_len = min(len - pos, block_len = min(len - pos,
falcon_spi_write_limit(spi, start + pos)); falcon_spi_write_limit(spi, start + pos));
command = efx_spi_munge_command(spi, SPI_WRITE, start + pos); command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos);
rc = falcon_spi_cmd(efx, spi, command, start + pos, rc = falcon_spi_cmd(efx, spi, command, start + pos,
buffer + pos, NULL, block_len); buffer + pos, NULL, block_len);
if (rc) if (rc)
...@@ -586,7 +586,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi, ...@@ -586,7 +586,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
if (rc) if (rc)
break; break;
command = efx_spi_munge_command(spi, SPI_READ, start + pos); command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
rc = falcon_spi_cmd(efx, spi, command, start + pos, rc = falcon_spi_cmd(efx, spi, command, start + pos,
NULL, verify_buffer, block_len); NULL, verify_buffer, block_len);
if (memcmp(verify_buffer, buffer + pos, block_len)) { if (memcmp(verify_buffer, buffer + pos, block_len)) {
...@@ -686,7 +686,7 @@ static void falcon_ack_status_intr(struct efx_nic *efx) ...@@ -686,7 +686,7 @@ static void falcon_ack_status_intr(struct efx_nic *efx)
return; return;
/* We expect xgmii faults if the wireside link is down */ /* We expect xgmii faults if the wireside link is down */
if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up) if (!efx->link_state.up)
return; return;
/* We can only use this interrupt to signal the negative edge of /* We can only use this interrupt to signal the negative edge of
...@@ -764,7 +764,7 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx) ...@@ -764,7 +764,7 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
FRF_AB_XM_RXEN, 1, FRF_AB_XM_RXEN, 1,
FRF_AB_XM_AUTO_DEPAD, 0, FRF_AB_XM_AUTO_DEPAD, 0,
FRF_AB_XM_ACPT_ALL_MCAST, 1, FRF_AB_XM_ACPT_ALL_MCAST, 1,
FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous, FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
FRF_AB_XM_PASS_CRC_ERR, 1); FRF_AB_XM_PASS_CRC_ERR, 1);
efx_writeo(efx, &reg, FR_AB_XM_RX_CFG); efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
...@@ -795,29 +795,22 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) ...@@ -795,29 +795,22 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS); bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI); bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII); bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
/* XGXS block is flaky and will need to be reset if moving /* XGXS block is flaky and will need to be reset if moving
* into our out of XGMII, XGXS or XAUI loopbacks. */ * into our out of XGMII, XGXS or XAUI loopbacks. */
if (EFX_WORKAROUND_5147(efx)) {
bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
bool reset_xgxs;
efx_reado(efx, &reg, FR_AB_XX_CORE_STAT); efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN); old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
old_xgmii_loopback = old_xgmii_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
efx_reado(efx, &reg, FR_AB_XX_SD_CTL); efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA); old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
/* The PHY driver may have turned XAUI off */ /* The PHY driver may have turned XAUI off */
reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || if ((xgxs_loopback != old_xgxs_loopback) ||
(xaui_loopback != old_xaui_loopback) || (xaui_loopback != old_xaui_loopback) ||
(xgmii_loopback != old_xgmii_loopback)); (xgmii_loopback != old_xgmii_loopback))
if (reset_xgxs)
falcon_reset_xaui(efx); falcon_reset_xaui(efx);
}
efx_reado(efx, &reg, FR_AB_XX_CORE_STAT); efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG, EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
...@@ -871,6 +864,8 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx) ...@@ -871,6 +864,8 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx)
{ {
struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_nic_data *nic_data = efx->nic_data;
efx_farch_filter_sync_rx_mode(efx);
falcon_reconfigure_xgxs_core(efx); falcon_reconfigure_xgxs_core(efx);
falcon_reconfigure_xmac_core(efx); falcon_reconfigure_xmac_core(efx);
...@@ -946,8 +941,8 @@ static void falcon_poll_xmac(struct efx_nic *efx) ...@@ -946,8 +941,8 @@ static void falcon_poll_xmac(struct efx_nic *efx)
{ {
struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_nic_data *nic_data = efx->nic_data;
if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up || /* We expect xgmii faults if the wireside link is down */
!nic_data->xmac_poll_required) if (!efx->link_state.up || !nic_data->xmac_poll_required)
return; return;
nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
...@@ -1088,7 +1083,7 @@ static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) ...@@ -1088,7 +1083,7 @@ static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
EFX_POPULATE_OWORD_5(reg, EFX_POPULATE_OWORD_5(reg,
FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */, FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
FRF_AB_MAC_BCAD_ACPT, 1, FRF_AB_MAC_BCAD_ACPT, 1,
FRF_AB_MAC_UC_PROM, efx->promiscuous, FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
FRF_AB_MAC_LINK_STATUS, 1, /* always set */ FRF_AB_MAC_LINK_STATUS, 1, /* always set */
FRF_AB_MAC_SPEED, link_speed); FRF_AB_MAC_SPEED, link_speed);
/* On B0, MAC backpressure can be disabled and packets get /* On B0, MAC backpressure can be disabled and packets get
...@@ -1486,15 +1481,15 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) ...@@ -1486,15 +1481,15 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
{ {
struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_nvconfig *nvconfig; struct falcon_nvconfig *nvconfig;
struct efx_spi_device *spi; struct falcon_spi_device *spi;
void *region; void *region;
int rc, magic_num, struct_ver; int rc, magic_num, struct_ver;
__le16 *word, *limit; __le16 *word, *limit;
u32 csum; u32 csum;
if (efx_spi_present(&nic_data->spi_flash)) if (falcon_spi_present(&nic_data->spi_flash))
spi = &nic_data->spi_flash; spi = &nic_data->spi_flash;
else if (efx_spi_present(&nic_data->spi_eeprom)) else if (falcon_spi_present(&nic_data->spi_eeprom))
spi = &nic_data->spi_eeprom; spi = &nic_data->spi_eeprom;
else else
return -EINVAL; return -EINVAL;
...@@ -1509,7 +1504,7 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) ...@@ -1509,7 +1504,7 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
mutex_unlock(&nic_data->spi_lock); mutex_unlock(&nic_data->spi_lock);
if (rc) { if (rc) {
netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
efx_spi_present(&nic_data->spi_flash) ? falcon_spi_present(&nic_data->spi_flash) ?
"flash" : "EEPROM"); "flash" : "EEPROM");
rc = -EIO; rc = -EIO;
goto out; goto out;
...@@ -1854,7 +1849,7 @@ static int falcon_reset_sram(struct efx_nic *efx) ...@@ -1854,7 +1849,7 @@ static int falcon_reset_sram(struct efx_nic *efx)
} }
static void falcon_spi_device_init(struct efx_nic *efx, static void falcon_spi_device_init(struct efx_nic *efx,
struct efx_spi_device *spi_device, struct falcon_spi_device *spi_device,
unsigned int device_id, u32 device_type) unsigned int device_id, u32 device_type)
{ {
if (device_type != 0) { if (device_type != 0) {
...@@ -1970,6 +1965,20 @@ static void falcon_probe_spi_devices(struct efx_nic *efx) ...@@ -1970,6 +1965,20 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
large_eeprom_type); large_eeprom_type);
} }
static unsigned int falcon_a1_mem_map_size(struct efx_nic *efx)
{
return 0x20000;
}
static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx)
{
/* Map everything up to and including the RSS indirection table.
* The PCI core takes care of mapping the MSI-X tables.
*/
return FR_BZ_RX_INDIRECTION_TBL +
FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
}
static int falcon_probe_nic(struct efx_nic *efx) static int falcon_probe_nic(struct efx_nic *efx)
{ {
struct falcon_nic_data *nic_data; struct falcon_nic_data *nic_data;
...@@ -2060,6 +2069,8 @@ static int falcon_probe_nic(struct efx_nic *efx) ...@@ -2060,6 +2069,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
goto fail5; goto fail5;
} }
efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
EFX_MAX_CHANNELS);
efx->timer_quantum_ns = 4968; /* 621 cycles */ efx->timer_quantum_ns = 4968; /* 621 cycles */
/* Initialise I2C adapter */ /* Initialise I2C adapter */
...@@ -2339,6 +2350,7 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type) ...@@ -2339,6 +2350,7 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
*/ */
const struct efx_nic_type falcon_a1_nic_type = { const struct efx_nic_type falcon_a1_nic_type = {
.mem_map_size = falcon_a1_mem_map_size,
.probe = falcon_probe_nic, .probe = falcon_probe_nic,
.remove = falcon_remove_nic, .remove = falcon_remove_nic,
.init = falcon_init_nic, .init = falcon_init_nic,
...@@ -2390,8 +2402,22 @@ const struct efx_nic_type falcon_a1_nic_type = { ...@@ -2390,8 +2402,22 @@ const struct efx_nic_type falcon_a1_nic_type = {
.ev_read_ack = efx_farch_ev_read_ack, .ev_read_ack = efx_farch_ev_read_ack,
.ev_test_generate = efx_farch_ev_test_generate, .ev_test_generate = efx_farch_ev_test_generate,
/* We don't expose the filter table on Falcon A1 as it is not
* mapped into function 0, but these implementations still
* work with a degenerate case of all tables set to size 0.
*/
.filter_table_probe = efx_farch_filter_table_probe,
.filter_table_restore = efx_farch_filter_table_restore,
.filter_table_remove = efx_farch_filter_table_remove,
.filter_insert = efx_farch_filter_insert,
.filter_remove_safe = efx_farch_filter_remove_safe,
.filter_get_safe = efx_farch_filter_get_safe,
.filter_clear_rx = efx_farch_filter_clear_rx,
.filter_count_rx_used = efx_farch_filter_count_rx_used,
.filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
.filter_get_rx_ids = efx_farch_filter_get_rx_ids,
.revision = EFX_REV_FALCON_A1, .revision = EFX_REV_FALCON_A1,
.mem_map_size = 0x20000,
.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER, .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER, .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
...@@ -2401,13 +2427,13 @@ const struct efx_nic_type falcon_a1_nic_type = { ...@@ -2401,13 +2427,13 @@ const struct efx_nic_type falcon_a1_nic_type = {
.rx_buffer_padding = 0x24, .rx_buffer_padding = 0x24,
.can_rx_scatter = false, .can_rx_scatter = false,
.max_interrupt_mode = EFX_INT_MODE_MSI, .max_interrupt_mode = EFX_INT_MODE_MSI,
.phys_addr_channels = 4,
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM, .offload_features = NETIF_F_IP_CSUM,
.mcdi_max_ver = -1, .mcdi_max_ver = -1,
}; };
const struct efx_nic_type falcon_b0_nic_type = { const struct efx_nic_type falcon_b0_nic_type = {
.mem_map_size = falcon_b0_mem_map_size,
.probe = falcon_probe_nic, .probe = falcon_probe_nic,
.remove = falcon_remove_nic, .remove = falcon_remove_nic,
.init = falcon_init_nic, .init = falcon_init_nic,
...@@ -2459,14 +2485,23 @@ const struct efx_nic_type falcon_b0_nic_type = { ...@@ -2459,14 +2485,23 @@ const struct efx_nic_type falcon_b0_nic_type = {
.ev_process = efx_farch_ev_process, .ev_process = efx_farch_ev_process,
.ev_read_ack = efx_farch_ev_read_ack, .ev_read_ack = efx_farch_ev_read_ack,
.ev_test_generate = efx_farch_ev_test_generate, .ev_test_generate = efx_farch_ev_test_generate,
.filter_table_probe = efx_farch_filter_table_probe,
.filter_table_restore = efx_farch_filter_table_restore,
.filter_table_remove = efx_farch_filter_table_remove,
.filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
.filter_insert = efx_farch_filter_insert,
.filter_remove_safe = efx_farch_filter_remove_safe,
.filter_get_safe = efx_farch_filter_get_safe,
.filter_clear_rx = efx_farch_filter_clear_rx,
.filter_count_rx_used = efx_farch_filter_count_rx_used,
.filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
.filter_get_rx_ids = efx_farch_filter_get_rx_ids,
#ifdef CONFIG_RFS_ACCEL
.filter_rfs_insert = efx_farch_filter_rfs_insert,
.filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
#endif
.revision = EFX_REV_FALCON_B0, .revision = EFX_REV_FALCON_B0,
/* Map everything up to and including the RSS indirection
* table. Don't map MSI-X table, MSI-X PBA since Linux
* requires that they not be mapped. */
.mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
FR_BZ_RX_INDIRECTION_TBL_STEP *
FR_BZ_RX_INDIRECTION_TBL_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL, .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
...@@ -2477,11 +2512,9 @@ const struct efx_nic_type falcon_b0_nic_type = { ...@@ -2477,11 +2512,9 @@ const struct efx_nic_type falcon_b0_nic_type = {
.rx_buffer_padding = 0, .rx_buffer_padding = 0,
.can_rx_scatter = true, .can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX, .max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32
* channels */
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
.mcdi_max_ver = -1, .mcdi_max_ver = -1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
}; };
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/crc32.h>
#include "net_driver.h" #include "net_driver.h"
#include "bitfield.h" #include "bitfield.h"
#include "efx.h" #include "efx.h"
...@@ -830,8 +831,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) ...@@ -830,8 +831,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
netif_tx_lock(efx->net_dev); netif_tx_lock(efx->net_dev);
efx_farch_notify_tx_desc(tx_queue); efx_farch_notify_tx_desc(tx_queue);
netif_tx_unlock(efx->net_dev); netif_tx_unlock(efx->net_dev);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
EFX_WORKAROUND_10727(efx)) {
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
} else { } else {
netif_err(efx, tx_err, efx->net_dev, netif_err(efx, tx_err, efx->net_dev,
...@@ -1531,7 +1531,6 @@ irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) ...@@ -1531,7 +1531,6 @@ irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
} }
if (queues != 0) { if (queues != 0) {
if (EFX_WORKAROUND_15783(efx))
efx->irq_zero_count = 0; efx->irq_zero_count = 0;
/* Schedule processing of any interrupting queues */ /* Schedule processing of any interrupting queues */
...@@ -1544,9 +1543,11 @@ irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) ...@@ -1544,9 +1543,11 @@ irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
} }
result = IRQ_HANDLED; result = IRQ_HANDLED;
} else if (EFX_WORKAROUND_15783(efx)) { } else {
efx_qword_t *event; efx_qword_t *event;
/* Legacy ISR read can return zero once (SF bug 15783) */
/* We can't return IRQ_HANDLED more than once on seeing ISR=0 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
* because this might be a shared interrupt. */ * because this might be a shared interrupt. */
if (efx->irq_zero_count++ == 0) if (efx->irq_zero_count++ == 0)
...@@ -1779,3 +1780,1163 @@ void efx_farch_init_common(struct efx_nic *efx) ...@@ -1779,3 +1780,1163 @@ void efx_farch_init_common(struct efx_nic *efx)
efx_writeo(efx, &temp, FR_BZ_TX_PACE); efx_writeo(efx, &temp, FR_BZ_TX_PACE);
} }
} }
/**************************************************************************
*
* Filter tables
*
**************************************************************************
*/
/* "Fudge factors" - difference between programmed value and actual depth.
* Due to pipelined implementation we need to program H/W with a value that
* is larger than the hop limit we want.
*/
#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
/* Hard maximum search limit. Hardware will time-out beyond 200-something.
* We also need to avoid infinite loops in efx_farch_filter_search() when the
* table is full.
*/
#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
/* Don't try very hard to find space for performance hints, as this is
* counter-productive. */
#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
enum efx_farch_filter_type {
EFX_FARCH_FILTER_TCP_FULL = 0,
EFX_FARCH_FILTER_TCP_WILD,
EFX_FARCH_FILTER_UDP_FULL,
EFX_FARCH_FILTER_UDP_WILD,
EFX_FARCH_FILTER_MAC_FULL = 4,
EFX_FARCH_FILTER_MAC_WILD,
EFX_FARCH_FILTER_UC_DEF = 8,
EFX_FARCH_FILTER_MC_DEF,
EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
};
enum efx_farch_filter_table_id {
EFX_FARCH_FILTER_TABLE_RX_IP = 0,
EFX_FARCH_FILTER_TABLE_RX_MAC,
EFX_FARCH_FILTER_TABLE_RX_DEF,
EFX_FARCH_FILTER_TABLE_TX_MAC,
EFX_FARCH_FILTER_TABLE_COUNT,
};
enum efx_farch_filter_index {
EFX_FARCH_FILTER_INDEX_UC_DEF,
EFX_FARCH_FILTER_INDEX_MC_DEF,
EFX_FARCH_FILTER_SIZE_RX_DEF,
};
struct efx_farch_filter_spec {
u8 type:4;
u8 priority:4;
u8 flags;
u16 dmaq_id;
u32 data[3];
};
struct efx_farch_filter_table {
enum efx_farch_filter_table_id id;
u32 offset; /* address of table relative to BAR */
unsigned size; /* number of entries */
unsigned step; /* step between entries */
unsigned used; /* number currently used */
unsigned long *used_bitmap;
struct efx_farch_filter_spec *spec;
unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
};
struct efx_farch_filter_state {
struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
};
static void
efx_farch_filter_table_clear_entry(struct efx_nic *efx,
struct efx_farch_filter_table *table,
unsigned int filter_idx);
/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
* key derived from the n-tuple. The initial LFSR state is 0xffff. */
static u16 efx_farch_filter_hash(u32 key)
{
u16 tmp;
/* First 16 rounds */
tmp = 0x1fff ^ key >> 16;
tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
tmp = tmp ^ tmp >> 9;
/* Last 16 rounds */
tmp = tmp ^ tmp << 13 ^ key;
tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
return tmp ^ tmp >> 9;
}
/* To allow for hash collisions, filter search continues at these
* increments from the first possible entry selected by the hash. */
static u16 efx_farch_filter_increment(u32 key)
{
return key * 2 - 1;
}
static enum efx_farch_filter_table_id
efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
{
BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
(EFX_FARCH_FILTER_TCP_FULL >> 2));
BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
(EFX_FARCH_FILTER_TCP_WILD >> 2));
BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
(EFX_FARCH_FILTER_UDP_FULL >> 2));
BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
(EFX_FARCH_FILTER_UDP_WILD >> 2));
BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
(EFX_FARCH_FILTER_MAC_FULL >> 2));
BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
(EFX_FARCH_FILTER_MAC_WILD >> 2));
BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
}
static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
{
struct efx_farch_filter_state *state = efx->filter_state;
struct efx_farch_filter_table *table;
efx_oword_t filter_ctl;
efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
if (table->size) {
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
}
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
if (table->size) {
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
!!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
EFX_FILTER_FLAG_RX_RSS));
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
!!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
EFX_FILTER_FLAG_RX_RSS));
/* There is a single bit to enable RX scatter for all
* unmatched packets. Only set it if scatter is
* enabled in both filter specs.
*/
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
!!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
EFX_FILTER_FLAG_RX_SCATTER));
} else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
/* We don't expose 'default' filters because unmatched
* packets always go to the queue number found in the
* RSS table. But we still need to set the RX scatter
* bit here.
*/
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
efx->rx_scatter);
}
efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
}
static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
{
struct efx_farch_filter_state *state = efx->filter_state;
struct efx_farch_filter_table *table;
efx_oword_t tx_cfg;
efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
if (table->size) {
EFX_SET_OWORD_FIELD(
tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(
tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
}
efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
}
static int
efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
const struct efx_filter_spec *gen_spec)
{
bool is_full = false;
if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) &&
gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT)
return -EINVAL;
spec->priority = gen_spec->priority;
spec->flags = gen_spec->flags;
spec->dmaq_id = gen_spec->dmaq_id;
switch (gen_spec->match_flags) {
case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
is_full = true;
/* fall through */
case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
__be32 rhost, host1, host2;
__be16 rport, port1, port2;
EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
if (gen_spec->ether_type != htons(ETH_P_IP))
return -EPROTONOSUPPORT;
if (gen_spec->loc_port == 0 ||
(is_full && gen_spec->rem_port == 0))
return -EADDRNOTAVAIL;
switch (gen_spec->ip_proto) {
case IPPROTO_TCP:
spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
EFX_FARCH_FILTER_TCP_WILD);
break;
case IPPROTO_UDP:
spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
EFX_FARCH_FILTER_UDP_WILD);
break;
default:
return -EPROTONOSUPPORT;
}
/* Filter is constructed in terms of source and destination,
* with the odd wrinkle that the ports are swapped in a UDP
* wildcard filter. We need to convert from local and remote
* (= zero for wildcard) addresses.
*/
rhost = is_full ? gen_spec->rem_host[0] : 0;
rport = is_full ? gen_spec->rem_port : 0;
host1 = rhost;
host2 = gen_spec->loc_host[0];
if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
port1 = gen_spec->loc_port;
port2 = rport;
} else {
port1 = rport;
port2 = gen_spec->loc_port;
}
spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
spec->data[2] = ntohl(host2);
break;
}
case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
is_full = true;
/* fall through */
case EFX_FILTER_MATCH_LOC_MAC:
spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
EFX_FARCH_FILTER_MAC_WILD);
spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
spec->data[1] = (gen_spec->loc_mac[2] << 24 |
gen_spec->loc_mac[3] << 16 |
gen_spec->loc_mac[4] << 8 |
gen_spec->loc_mac[5]);
spec->data[2] = (gen_spec->loc_mac[0] << 8 |
gen_spec->loc_mac[1]);
break;
case EFX_FILTER_MATCH_LOC_MAC_IG:
spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
EFX_FARCH_FILTER_MC_DEF :
EFX_FARCH_FILTER_UC_DEF);
memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
break;
default:
return -EPROTONOSUPPORT;
}
return 0;
}
static void
efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
const struct efx_farch_filter_spec *spec)
{
bool is_full = false;
/* *gen_spec should be completely initialised, to be consistent
* with efx_filter_init_{rx,tx}() and in case we want to copy
* it back to userland.
*/
memset(gen_spec, 0, sizeof(*gen_spec));
gen_spec->priority = spec->priority;
gen_spec->flags = spec->flags;
gen_spec->dmaq_id = spec->dmaq_id;
switch (spec->type) {
case EFX_FARCH_FILTER_TCP_FULL:
case EFX_FARCH_FILTER_UDP_FULL:
is_full = true;
/* fall through */
case EFX_FARCH_FILTER_TCP_WILD:
case EFX_FARCH_FILTER_UDP_WILD: {
__be32 host1, host2;
__be16 port1, port2;
gen_spec->match_flags =
EFX_FILTER_MATCH_ETHER_TYPE |
EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
if (is_full)
gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
EFX_FILTER_MATCH_REM_PORT);
gen_spec->ether_type = htons(ETH_P_IP);
gen_spec->ip_proto =
(spec->type == EFX_FARCH_FILTER_TCP_FULL ||
spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
IPPROTO_TCP : IPPROTO_UDP;
host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
port1 = htons(spec->data[0]);
host2 = htonl(spec->data[2]);
port2 = htons(spec->data[1] >> 16);
if (spec->flags & EFX_FILTER_FLAG_TX) {
gen_spec->loc_host[0] = host1;
gen_spec->rem_host[0] = host2;
} else {
gen_spec->loc_host[0] = host2;
gen_spec->rem_host[0] = host1;
}
if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
(!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
gen_spec->loc_port = port1;
gen_spec->rem_port = port2;
} else {
gen_spec->loc_port = port2;
gen_spec->rem_port = port1;
}
break;
}
case EFX_FARCH_FILTER_MAC_FULL:
is_full = true;
/* fall through */
case EFX_FARCH_FILTER_MAC_WILD:
gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
if (is_full)
gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
gen_spec->loc_mac[0] = spec->data[2] >> 8;
gen_spec->loc_mac[1] = spec->data[2];
gen_spec->loc_mac[2] = spec->data[1] >> 24;
gen_spec->loc_mac[3] = spec->data[1] >> 16;
gen_spec->loc_mac[4] = spec->data[1] >> 8;
gen_spec->loc_mac[5] = spec->data[1];
gen_spec->outer_vid = htons(spec->data[0]);
break;
case EFX_FARCH_FILTER_UC_DEF:
case EFX_FARCH_FILTER_MC_DEF:
gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
break;
default:
WARN_ON(1);
break;
}
}
static void
efx_farch_filter_init_rx_for_stack(struct efx_nic *efx,
struct efx_farch_filter_spec *spec)
{
/* If there's only one channel then disable RSS for non VF
* traffic, thereby allowing VFs to use RSS when the PF can't.
*/
spec->priority = EFX_FILTER_PRI_REQUIRED;
spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK |
(efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
(efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
spec->dmaq_id = 0;
}
/* Build a filter entry and return its n-tuple key. */
static u32 efx_farch_filter_build(efx_oword_t *filter,
struct efx_farch_filter_spec *spec)
{
u32 data3;
switch (efx_farch_filter_spec_table_id(spec)) {
case EFX_FARCH_FILTER_TABLE_RX_IP: {
bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
spec->type == EFX_FARCH_FILTER_UDP_WILD);
EFX_POPULATE_OWORD_7(
*filter,
FRF_BZ_RSS_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
FRF_BZ_SCATTER_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
FRF_BZ_TCP_UDP, is_udp,
FRF_BZ_RXQ_ID, spec->dmaq_id,
EFX_DWORD_2, spec->data[2],
EFX_DWORD_1, spec->data[1],
EFX_DWORD_0, spec->data[0]);
data3 = is_udp;
break;
}
case EFX_FARCH_FILTER_TABLE_RX_MAC: {
bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
EFX_POPULATE_OWORD_7(
*filter,
FRF_CZ_RMFT_RSS_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
FRF_CZ_RMFT_SCATTER_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
data3 = is_wild;
break;
}
case EFX_FARCH_FILTER_TABLE_TX_MAC: {
bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
EFX_POPULATE_OWORD_5(*filter,
FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
data3 = is_wild | spec->dmaq_id << 1;
break;
}
default:
BUG();
}
return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
}
static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
const struct efx_farch_filter_spec *right)
{
if (left->type != right->type ||
memcmp(left->data, right->data, sizeof(left->data)))
return false;
if (left->flags & EFX_FILTER_FLAG_TX &&
left->dmaq_id != right->dmaq_id)
return false;
return true;
}
/*
* Construct/deconstruct external filter IDs. At least the RX filter
* IDs must be ordered by matching priority, for RX NFC semantics.
*
* Deconstruction needs to be robust against invalid IDs so that
* efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
* accept user-provided IDs.
*/
#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
[EFX_FARCH_FILTER_TCP_FULL] = 0,
[EFX_FARCH_FILTER_UDP_FULL] = 0,
[EFX_FARCH_FILTER_TCP_WILD] = 1,
[EFX_FARCH_FILTER_UDP_WILD] = 1,
[EFX_FARCH_FILTER_MAC_FULL] = 2,
[EFX_FARCH_FILTER_MAC_WILD] = 3,
[EFX_FARCH_FILTER_UC_DEF] = 4,
[EFX_FARCH_FILTER_MC_DEF] = 4,
};
static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
EFX_FARCH_FILTER_TABLE_RX_IP,
EFX_FARCH_FILTER_TABLE_RX_MAC,
EFX_FARCH_FILTER_TABLE_RX_MAC,
EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
};
#define EFX_FARCH_FILTER_INDEX_WIDTH 13
#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
static inline u32
efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
unsigned int index)
{
unsigned int range;
range = efx_farch_filter_type_match_pri[spec->type];
if (!(spec->flags & EFX_FILTER_FLAG_RX))
range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
}
static inline enum efx_farch_filter_table_id
efx_farch_filter_id_table_id(u32 id)
{
unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
if (range < ARRAY_SIZE(efx_farch_filter_range_table))
return efx_farch_filter_range_table[range];
else
return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
}
static inline unsigned int efx_farch_filter_id_index(u32 id)
{
return id & EFX_FARCH_FILTER_INDEX_MASK;
}
u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
{
struct efx_farch_filter_state *state = efx->filter_state;
unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
enum efx_farch_filter_table_id table_id;
do {
table_id = efx_farch_filter_range_table[range];
if (state->table[table_id].size != 0)
return range << EFX_FARCH_FILTER_INDEX_WIDTH |
state->table[table_id].size;
} while (range--);
return 0;
}
s32 efx_farch_filter_insert(struct efx_nic *efx,
struct efx_filter_spec *gen_spec,
bool replace_equal)
{
struct efx_farch_filter_state *state = efx->filter_state;
struct efx_farch_filter_table *table;
struct efx_farch_filter_spec spec;
efx_oword_t filter;
int rep_index, ins_index;
unsigned int depth = 0;
int rc;
rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
if (rc)
return rc;
table = &state->table[efx_farch_filter_spec_table_id(&spec)];
if (table->size == 0)
return -EINVAL;
netif_vdbg(efx, hw, efx->net_dev,
"%s: type %d search_limit=%d", __func__, spec.type,
table->search_limit[spec.type]);
if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
/* One filter spec per type */
BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
ins_index = rep_index;
spin_lock_bh(&efx->filter_lock);
} else {
/* Search concurrently for
* (1) a filter to be replaced (rep_index): any filter
* with the same match values, up to the current
* search depth for this type, and
* (2) the insertion point (ins_index): (1) or any
* free slot before it or up to the maximum search
* depth for this priority
* We fail if we cannot find (2).
*
* We can stop once either
* (a) we find (1), in which case we have definitely
* found (2) as well; or
* (b) we have searched exhaustively for (1), and have
* either found (2) or searched exhaustively for it
*/
u32 key = efx_farch_filter_build(&filter, &spec);
unsigned int hash = efx_farch_filter_hash(key);
unsigned int incr = efx_farch_filter_increment(key);
unsigned int max_rep_depth = table->search_limit[spec.type];
unsigned int max_ins_depth =
spec.priority <= EFX_FILTER_PRI_HINT ?
EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
EFX_FARCH_FILTER_CTL_SRCH_MAX;
unsigned int i = hash & (table->size - 1);
ins_index = -1;
depth = 1;
spin_lock_bh(&efx->filter_lock);
for (;;) {
if (!test_bit(i, table->used_bitmap)) {
if (ins_index < 0)
ins_index = i;
} else if (efx_farch_filter_equal(&spec,
&table->spec[i])) {
/* Case (a) */
if (ins_index < 0)
ins_index = i;
rep_index = i;
break;
}
if (depth >= max_rep_depth &&
(ins_index >= 0 || depth >= max_ins_depth)) {
/* Case (b) */
if (ins_index < 0) {
rc = -EBUSY;
goto out;
}
rep_index = -1;
break;
}
i = (i + incr) & (table->size - 1);
++depth;
}
}
/* If we found a filter to be replaced, check whether we
* should do so
*/
if (rep_index >= 0) {
struct efx_farch_filter_spec *saved_spec =
&table->spec[rep_index];
if (spec.priority == saved_spec->priority && !replace_equal) {
rc = -EEXIST;
goto out;
}
if (spec.priority < saved_spec->priority &&
!(saved_spec->priority == EFX_FILTER_PRI_REQUIRED &&
saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) {
rc = -EPERM;
goto out;
}
if (spec.flags & EFX_FILTER_FLAG_RX_STACK) {
/* Just make sure it won't be removed */
saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
rc = 0;
goto out;
}
/* Retain the RX_STACK flag */
spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK;
}
/* Insert the filter */
if (ins_index != rep_index) {
__set_bit(ins_index, table->used_bitmap);
++table->used;
}
table->spec[ins_index] = spec;
if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
efx_farch_filter_push_rx_config(efx);
} else {
if (table->search_limit[spec.type] < depth) {
table->search_limit[spec.type] = depth;
if (spec.flags & EFX_FILTER_FLAG_TX)
efx_farch_filter_push_tx_limits(efx);
else
efx_farch_filter_push_rx_config(efx);
}
efx_writeo(efx, &filter,
table->offset + table->step * ins_index);
/* If we were able to replace a filter by inserting
* at a lower depth, clear the replaced filter
*/
if (ins_index != rep_index && rep_index >= 0)
efx_farch_filter_table_clear_entry(efx, table,
rep_index);
}
netif_vdbg(efx, hw, efx->net_dev,
"%s: filter type %d index %d rxq %u set",
__func__, spec.type, ins_index, spec.dmaq_id);
rc = efx_farch_filter_make_id(&spec, ins_index);
out:
spin_unlock_bh(&efx->filter_lock);
return rc;
}
static void
efx_farch_filter_table_clear_entry(struct efx_nic *efx,
struct efx_farch_filter_table *table,
unsigned int filter_idx)
{
static efx_oword_t filter;
EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
BUG_ON(table->offset == 0); /* can't clear MAC default filters */
__clear_bit(filter_idx, table->used_bitmap);
--table->used;
memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
/* If this filter required a greater search depth than
* any other, the search limit for its type can now be
* decreased. However, it is hard to determine that
* unless the table has become completely empty - in
* which case, all its search limits can be set to 0.
*/
if (unlikely(table->used == 0)) {
memset(table->search_limit, 0, sizeof(table->search_limit));
if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
efx_farch_filter_push_tx_limits(efx);
else
efx_farch_filter_push_rx_config(efx);
}
}
static int efx_farch_filter_remove(struct efx_nic *efx,
struct efx_farch_filter_table *table,
unsigned int filter_idx,
enum efx_filter_priority priority)
{
struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
if (!test_bit(filter_idx, table->used_bitmap) ||
spec->priority > priority)
return -ENOENT;
if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
efx_farch_filter_init_rx_for_stack(efx, spec);
efx_farch_filter_push_rx_config(efx);
} else {
efx_farch_filter_table_clear_entry(efx, table, filter_idx);
}
return 0;
}
int efx_farch_filter_remove_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id)
{
struct efx_farch_filter_state *state = efx->filter_state;
enum efx_farch_filter_table_id table_id;
struct efx_farch_filter_table *table;
unsigned int filter_idx;
struct efx_farch_filter_spec *spec;
int rc;
table_id = efx_farch_filter_id_table_id(filter_id);
if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
return -ENOENT;
table = &state->table[table_id];
filter_idx = efx_farch_filter_id_index(filter_id);
if (filter_idx >= table->size)
return -ENOENT;
spec = &table->spec[filter_idx];
spin_lock_bh(&efx->filter_lock);
rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
spin_unlock_bh(&efx->filter_lock);
return rc;
}
int efx_farch_filter_get_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *spec_buf)
{
struct efx_farch_filter_state *state = efx->filter_state;
enum efx_farch_filter_table_id table_id;
struct efx_farch_filter_table *table;
struct efx_farch_filter_spec *spec;
unsigned int filter_idx;
int rc;
table_id = efx_farch_filter_id_table_id(filter_id);
if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
return -ENOENT;
table = &state->table[table_id];
filter_idx = efx_farch_filter_id_index(filter_id);
if (filter_idx >= table->size)
return -ENOENT;
spec = &table->spec[filter_idx];
spin_lock_bh(&efx->filter_lock);
if (test_bit(filter_idx, table->used_bitmap) &&
spec->priority == priority) {
efx_farch_filter_to_gen_spec(spec_buf, spec);
rc = 0;
} else {
rc = -ENOENT;
}
spin_unlock_bh(&efx->filter_lock);
return rc;
}
static void
efx_farch_filter_table_clear(struct efx_nic *efx,
enum efx_farch_filter_table_id table_id,
enum efx_filter_priority priority)
{
struct efx_farch_filter_state *state = efx->filter_state;
struct efx_farch_filter_table *table = &state->table[table_id];
unsigned int filter_idx;
spin_lock_bh(&efx->filter_lock);
for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
efx_farch_filter_remove(efx, table, filter_idx, priority);
spin_unlock_bh(&efx->filter_lock);
}
void efx_farch_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority)
{
efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
priority);
efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
priority);
efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
priority);
}
u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority)
{
struct efx_farch_filter_state *state = efx->filter_state;
enum efx_farch_filter_table_id table_id;
struct efx_farch_filter_table *table;
unsigned int filter_idx;
u32 count = 0;
spin_lock_bh(&efx->filter_lock);
for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
table_id++) {
table = &state->table[table_id];
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
if (test_bit(filter_idx, table->used_bitmap) &&
table->spec[filter_idx].priority == priority)
++count;
}
}
spin_unlock_bh(&efx->filter_lock);
return count;
}
s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 *buf, u32 size)
{
struct efx_farch_filter_state *state = efx->filter_state;
enum efx_farch_filter_table_id table_id;
struct efx_farch_filter_table *table;
unsigned int filter_idx;
s32 count = 0;
spin_lock_bh(&efx->filter_lock);
for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
table_id++) {
table = &state->table[table_id];
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
if (test_bit(filter_idx, table->used_bitmap) &&
table->spec[filter_idx].priority == priority) {
if (count == size) {
count = -EMSGSIZE;
goto out;
}
buf[count++] = efx_farch_filter_make_id(
&table->spec[filter_idx], filter_idx);
}
}
}
out:
spin_unlock_bh(&efx->filter_lock);
return count;
}
/* Restore filter stater after reset */
void efx_farch_filter_table_restore(struct efx_nic *efx)
{
struct efx_farch_filter_state *state = efx->filter_state;
enum efx_farch_filter_table_id table_id;
struct efx_farch_filter_table *table;
efx_oword_t filter;
unsigned int filter_idx;
spin_lock_bh(&efx->filter_lock);
for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
table = &state->table[table_id];
/* Check whether this is a regular register table */
if (table->step == 0)
continue;
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
if (!test_bit(filter_idx, table->used_bitmap))
continue;
efx_farch_filter_build(&filter, &table->spec[filter_idx]);
efx_writeo(efx, &filter,
table->offset + table->step * filter_idx);
}
}
efx_farch_filter_push_rx_config(efx);
efx_farch_filter_push_tx_limits(efx);
spin_unlock_bh(&efx->filter_lock);
}
void efx_farch_filter_table_remove(struct efx_nic *efx)
{
struct efx_farch_filter_state *state = efx->filter_state;
enum efx_farch_filter_table_id table_id;
for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
kfree(state->table[table_id].used_bitmap);
vfree(state->table[table_id].spec);
}
kfree(state);
}
int efx_farch_filter_table_probe(struct efx_nic *efx)
{
struct efx_farch_filter_state *state;
struct efx_farch_filter_table *table;
unsigned table_id;
state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
if (!state)
return -ENOMEM;
efx->filter_state = state;
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
table->offset = FR_BZ_RX_FILTER_TBL0;
table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
table->step = FR_BZ_RX_FILTER_TBL0_STEP;
}
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
}
for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
table = &state->table[table_id];
if (table->size == 0)
continue;
table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
sizeof(unsigned long),
GFP_KERNEL);
if (!table->used_bitmap)
goto fail;
table->spec = vzalloc(table->size * sizeof(*table->spec));
if (!table->spec)
goto fail;
}
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
if (table->size) {
/* RX default filters must always exist */
struct efx_farch_filter_spec *spec;
unsigned i;
for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
spec = &table->spec[i];
spec->type = EFX_FARCH_FILTER_UC_DEF + i;
efx_farch_filter_init_rx_for_stack(efx, spec);
__set_bit(i, table->used_bitmap);
}
}
efx_farch_filter_push_rx_config(efx);
return 0;
fail:
efx_farch_filter_table_remove(efx);
return -ENOMEM;
}
/* Update scatter enable flags for filters pointing to our own RX queues */
void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
{
struct efx_farch_filter_state *state = efx->filter_state;
enum efx_farch_filter_table_id table_id;
struct efx_farch_filter_table *table;
efx_oword_t filter;
unsigned int filter_idx;
spin_lock_bh(&efx->filter_lock);
for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
table_id++) {
table = &state->table[table_id];
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
if (!test_bit(filter_idx, table->used_bitmap) ||
table->spec[filter_idx].dmaq_id >=
efx->n_rx_channels)
continue;
if (efx->rx_scatter)
table->spec[filter_idx].flags |=
EFX_FILTER_FLAG_RX_SCATTER;
else
table->spec[filter_idx].flags &=
~EFX_FILTER_FLAG_RX_SCATTER;
if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
/* Pushed by efx_farch_filter_push_rx_config() */
continue;
efx_farch_filter_build(&filter, &table->spec[filter_idx]);
efx_writeo(efx, &filter,
table->offset + table->step * filter_idx);
}
}
efx_farch_filter_push_rx_config(efx);
spin_unlock_bh(&efx->filter_lock);
}
#ifdef CONFIG_RFS_ACCEL
s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
struct efx_filter_spec *gen_spec)
{
return efx_farch_filter_insert(efx, gen_spec, true);
}
bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
unsigned int index)
{
struct efx_farch_filter_state *state = efx->filter_state;
struct efx_farch_filter_table *table =
&state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
if (test_bit(index, table->used_bitmap) &&
table->spec[index].priority == EFX_FILTER_PRI_HINT &&
rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
flow_id, index)) {
efx_farch_filter_table_clear_entry(efx, table, index);
return true;
}
return false;
}
#endif /* CONFIG_RFS_ACCEL */
void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
struct netdev_hw_addr *ha;
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
u32 crc;
int bit;
netif_addr_lock_bh(net_dev);
efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
/* Build multicast hash table */
if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
netdev_for_each_mc_addr(ha, net_dev) {
crc = ether_crc_le(ETH_ALEN, ha->addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
__set_bit_le(bit, mc_hash);
}
/* Broadcast packets go through the multicast hash filter.
* ether_crc_le() of the broadcast address is 0xbe2612ff
* so we always add bit 0xff to the mask.
*/
__set_bit_le(0xff, mc_hash);
}
netif_addr_unlock_bh(net_dev);
}
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/in.h>
#include <net/ip.h>
#include "efx.h"
#include "filter.h"
#include "io.h"
#include "nic.h"
#include "farch_regs.h"
/* "Fudge factors" - difference between programmed value and actual depth.
* Due to pipelined implementation we need to program H/W with a value that
* is larger than the hop limit we want.
*/
#define FILTER_CTL_SRCH_FUDGE_WILD 3
#define FILTER_CTL_SRCH_FUDGE_FULL 1
/* Hard maximum hop limit. Hardware will time-out beyond 200-something.
* We also need to avoid infinite loops in efx_filter_search() when the
* table is full.
*/
#define FILTER_CTL_SRCH_MAX 200
/* Don't try very hard to find space for performance hints, as this is
* counter-productive. */
#define FILTER_CTL_SRCH_HINT_MAX 5
enum efx_filter_table_id {
EFX_FILTER_TABLE_RX_IP = 0,
EFX_FILTER_TABLE_RX_MAC,
EFX_FILTER_TABLE_RX_DEF,
EFX_FILTER_TABLE_TX_MAC,
EFX_FILTER_TABLE_COUNT,
};
enum efx_filter_index {
EFX_FILTER_INDEX_UC_DEF,
EFX_FILTER_INDEX_MC_DEF,
EFX_FILTER_SIZE_RX_DEF,
};
struct efx_filter_table {
enum efx_filter_table_id id;
u32 offset; /* address of table relative to BAR */
unsigned size; /* number of entries */
unsigned step; /* step between entries */
unsigned used; /* number currently used */
unsigned long *used_bitmap;
struct efx_filter_spec *spec;
unsigned search_depth[EFX_FILTER_TYPE_COUNT];
};
struct efx_filter_state {
spinlock_t lock;
struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
#ifdef CONFIG_RFS_ACCEL
u32 *rps_flow_id;
unsigned rps_expire_index;
#endif
};
static void efx_filter_table_clear_entry(struct efx_nic *efx,
struct efx_filter_table *table,
unsigned int filter_idx);
/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
* key derived from the n-tuple. The initial LFSR state is 0xffff. */
static u16 efx_filter_hash(u32 key)
{
u16 tmp;
/* First 16 rounds */
tmp = 0x1fff ^ key >> 16;
tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
tmp = tmp ^ tmp >> 9;
/* Last 16 rounds */
tmp = tmp ^ tmp << 13 ^ key;
tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
return tmp ^ tmp >> 9;
}
/* To allow for hash collisions, filter search continues at these
* increments from the first possible entry selected by the hash. */
static u16 efx_filter_increment(u32 key)
{
return key * 2 - 1;
}
static enum efx_filter_table_id
efx_filter_spec_table_id(const struct efx_filter_spec *spec)
{
BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
BUILD_BUG_ON(EFX_FILTER_TABLE_TX_MAC != EFX_FILTER_TABLE_RX_MAC + 2);
EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
}
static struct efx_filter_table *
efx_filter_spec_table(struct efx_filter_state *state,
const struct efx_filter_spec *spec)
{
if (spec->type == EFX_FILTER_UNSPEC)
return NULL;
else
return &state->table[efx_filter_spec_table_id(spec)];
}
static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
{
memset(table->search_depth, 0, sizeof(table->search_depth));
}
static void efx_filter_push_rx_config(struct efx_nic *efx)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table;
efx_oword_t filter_ctl;
efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
table = &state->table[EFX_FILTER_TABLE_RX_IP];
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
table->search_depth[EFX_FILTER_TCP_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
table->search_depth[EFX_FILTER_TCP_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
table->search_depth[EFX_FILTER_UDP_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
table->search_depth[EFX_FILTER_UDP_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
table = &state->table[EFX_FILTER_TABLE_RX_MAC];
if (table->size) {
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
table->search_depth[EFX_FILTER_MAC_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
table->search_depth[EFX_FILTER_MAC_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
}
table = &state->table[EFX_FILTER_TABLE_RX_DEF];
if (table->size) {
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id);
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
!!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
EFX_FILTER_FLAG_RX_RSS));
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
!!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
EFX_FILTER_FLAG_RX_RSS));
/* There is a single bit to enable RX scatter for all
* unmatched packets. Only set it if scatter is
* enabled in both filter specs.
*/
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
!!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
EFX_FILTER_FLAG_RX_SCATTER));
} else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
/* We don't expose 'default' filters because unmatched
* packets always go to the queue number found in the
* RSS table. But we still need to set the RX scatter
* bit here.
*/
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
efx->rx_scatter);
}
efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
}
static void efx_filter_push_tx_limits(struct efx_nic *efx)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table;
efx_oword_t tx_cfg;
efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
table = &state->table[EFX_FILTER_TABLE_TX_MAC];
if (table->size) {
EFX_SET_OWORD_FIELD(
tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
table->search_depth[EFX_FILTER_MAC_FULL] +
FILTER_CTL_SRCH_FUDGE_FULL);
EFX_SET_OWORD_FIELD(
tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
table->search_depth[EFX_FILTER_MAC_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
}
efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
}
static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
__be32 host1, __be16 port1,
__be32 host2, __be16 port2)
{
spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
spec->data[2] = ntohl(host2);
}
static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
__be32 *host1, __be16 *port1,
__be32 *host2, __be16 *port2)
{
*host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
*port1 = htons(spec->data[0]);
*host2 = htonl(spec->data[2]);
*port2 = htons(spec->data[1] >> 16);
}
/**
* efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
* @spec: Specification to initialise
* @proto: Transport layer protocol number
* @host: Local host address (network byte order)
* @port: Local port (network byte order)
*/
int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
__be32 host, __be16 port)
{
__be32 host1;
__be16 port1;
EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
/* This cannot currently be combined with other filtering */
if (spec->type != EFX_FILTER_UNSPEC)
return -EPROTONOSUPPORT;
if (port == 0)
return -EINVAL;
switch (proto) {
case IPPROTO_TCP:
spec->type = EFX_FILTER_TCP_WILD;
break;
case IPPROTO_UDP:
spec->type = EFX_FILTER_UDP_WILD;
break;
default:
return -EPROTONOSUPPORT;
}
/* Filter is constructed in terms of source and destination,
* with the odd wrinkle that the ports are swapped in a UDP
* wildcard filter. We need to convert from local and remote
* (= zero for wildcard) addresses.
*/
host1 = 0;
if (proto != IPPROTO_UDP) {
port1 = 0;
} else {
port1 = port;
port = 0;
}
__efx_filter_set_ipv4(spec, host1, port1, host, port);
return 0;
}
int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
u8 *proto, __be32 *host, __be16 *port)
{
__be32 host1;
__be16 port1;
switch (spec->type) {
case EFX_FILTER_TCP_WILD:
*proto = IPPROTO_TCP;
__efx_filter_get_ipv4(spec, &host1, &port1, host, port);
return 0;
case EFX_FILTER_UDP_WILD:
*proto = IPPROTO_UDP;
__efx_filter_get_ipv4(spec, &host1, port, host, &port1);
return 0;
default:
return -EINVAL;
}
}
/**
* efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
* @spec: Specification to initialise
* @proto: Transport layer protocol number
* @host: Local host address (network byte order)
* @port: Local port (network byte order)
* @rhost: Remote host address (network byte order)
* @rport: Remote port (network byte order)
*/
int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
__be32 host, __be16 port,
__be32 rhost, __be16 rport)
{
EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
/* This cannot currently be combined with other filtering */
if (spec->type != EFX_FILTER_UNSPEC)
return -EPROTONOSUPPORT;
if (port == 0 || rport == 0)
return -EINVAL;
switch (proto) {
case IPPROTO_TCP:
spec->type = EFX_FILTER_TCP_FULL;
break;
case IPPROTO_UDP:
spec->type = EFX_FILTER_UDP_FULL;
break;
default:
return -EPROTONOSUPPORT;
}
__efx_filter_set_ipv4(spec, rhost, rport, host, port);
return 0;
}
int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
u8 *proto, __be32 *host, __be16 *port,
__be32 *rhost, __be16 *rport)
{
switch (spec->type) {
case EFX_FILTER_TCP_FULL:
*proto = IPPROTO_TCP;
break;
case EFX_FILTER_UDP_FULL:
*proto = IPPROTO_UDP;
break;
default:
return -EINVAL;
}
__efx_filter_get_ipv4(spec, rhost, rport, host, port);
return 0;
}
/**
* efx_filter_set_eth_local - specify local Ethernet address and optional VID
* @spec: Specification to initialise
* @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
* @addr: Local Ethernet MAC address
*/
int efx_filter_set_eth_local(struct efx_filter_spec *spec,
u16 vid, const u8 *addr)
{
EFX_BUG_ON_PARANOID(!(spec->flags &
(EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
/* This cannot currently be combined with other filtering */
if (spec->type != EFX_FILTER_UNSPEC)
return -EPROTONOSUPPORT;
if (vid == EFX_FILTER_VID_UNSPEC) {
spec->type = EFX_FILTER_MAC_WILD;
spec->data[0] = 0;
} else {
spec->type = EFX_FILTER_MAC_FULL;
spec->data[0] = vid;
}
spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
spec->data[2] = addr[0] << 8 | addr[1];
return 0;
}
/**
* efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
* @spec: Specification to initialise
*/
int efx_filter_set_uc_def(struct efx_filter_spec *spec)
{
EFX_BUG_ON_PARANOID(!(spec->flags &
(EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
if (spec->type != EFX_FILTER_UNSPEC)
return -EINVAL;
spec->type = EFX_FILTER_UC_DEF;
memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
return 0;
}
/**
* efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
* @spec: Specification to initialise
*/
int efx_filter_set_mc_def(struct efx_filter_spec *spec)
{
EFX_BUG_ON_PARANOID(!(spec->flags &
(EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
if (spec->type != EFX_FILTER_UNSPEC)
return -EINVAL;
spec->type = EFX_FILTER_MC_DEF;
memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
return 0;
}
static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
struct efx_filter_spec *spec = &table->spec[filter_idx];
enum efx_filter_flags flags = 0;
/* If there's only one channel then disable RSS for non VF
* traffic, thereby allowing VFs to use RSS when the PF can't.
*/
if (efx->n_rx_channels > 1)
flags |= EFX_FILTER_FLAG_RX_RSS;
if (efx->rx_scatter)
flags |= EFX_FILTER_FLAG_RX_SCATTER;
efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0);
spec->type = EFX_FILTER_UC_DEF + filter_idx;
table->used_bitmap[0] |= 1 << filter_idx;
}
int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
u16 *vid, u8 *addr)
{
switch (spec->type) {
case EFX_FILTER_MAC_WILD:
*vid = EFX_FILTER_VID_UNSPEC;
break;
case EFX_FILTER_MAC_FULL:
*vid = spec->data[0];
break;
default:
return -EINVAL;
}
addr[0] = spec->data[2] >> 8;
addr[1] = spec->data[2];
addr[2] = spec->data[1] >> 24;
addr[3] = spec->data[1] >> 16;
addr[4] = spec->data[1] >> 8;
addr[5] = spec->data[1];
return 0;
}
/* Build a filter entry and return its n-tuple key. */
static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
{
u32 data3;
switch (efx_filter_spec_table_id(spec)) {
case EFX_FILTER_TABLE_RX_IP: {
bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
spec->type == EFX_FILTER_UDP_WILD);
EFX_POPULATE_OWORD_7(
*filter,
FRF_BZ_RSS_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
FRF_BZ_SCATTER_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
FRF_BZ_TCP_UDP, is_udp,
FRF_BZ_RXQ_ID, spec->dmaq_id,
EFX_DWORD_2, spec->data[2],
EFX_DWORD_1, spec->data[1],
EFX_DWORD_0, spec->data[0]);
data3 = is_udp;
break;
}
case EFX_FILTER_TABLE_RX_MAC: {
bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
EFX_POPULATE_OWORD_7(
*filter,
FRF_CZ_RMFT_RSS_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
FRF_CZ_RMFT_SCATTER_EN,
!!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
data3 = is_wild;
break;
}
case EFX_FILTER_TABLE_TX_MAC: {
bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
EFX_POPULATE_OWORD_5(*filter,
FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
data3 = is_wild | spec->dmaq_id << 1;
break;
}
default:
BUG();
}
return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
}
static bool efx_filter_equal(const struct efx_filter_spec *left,
const struct efx_filter_spec *right)
{
if (left->type != right->type ||
memcmp(left->data, right->data, sizeof(left->data)))
return false;
if (left->flags & EFX_FILTER_FLAG_TX &&
left->dmaq_id != right->dmaq_id)
return false;
return true;
}
/*
* Construct/deconstruct external filter IDs. At least the RX filter
* IDs must be ordered by matching priority, for RX NFC semantics.
*
* Deconstruction needs to be robust against invalid IDs so that
* efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
* accept user-provided IDs.
*/
#define EFX_FILTER_MATCH_PRI_COUNT 5
static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
[EFX_FILTER_TCP_FULL] = 0,
[EFX_FILTER_UDP_FULL] = 0,
[EFX_FILTER_TCP_WILD] = 1,
[EFX_FILTER_UDP_WILD] = 1,
[EFX_FILTER_MAC_FULL] = 2,
[EFX_FILTER_MAC_WILD] = 3,
[EFX_FILTER_UC_DEF] = 4,
[EFX_FILTER_MC_DEF] = 4,
};
static const enum efx_filter_table_id efx_filter_range_table[] = {
EFX_FILTER_TABLE_RX_IP, /* RX match pri 0 */
EFX_FILTER_TABLE_RX_IP,
EFX_FILTER_TABLE_RX_MAC,
EFX_FILTER_TABLE_RX_MAC,
EFX_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
EFX_FILTER_TABLE_COUNT, /* TX match pri 0; invalid */
EFX_FILTER_TABLE_COUNT, /* invalid */
EFX_FILTER_TABLE_TX_MAC,
EFX_FILTER_TABLE_TX_MAC, /* TX match pri 3 */
};
#define EFX_FILTER_INDEX_WIDTH 13
#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
static inline u32
efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
{
unsigned int range;
range = efx_filter_type_match_pri[spec->type];
if (!(spec->flags & EFX_FILTER_FLAG_RX))
range += EFX_FILTER_MATCH_PRI_COUNT;
return range << EFX_FILTER_INDEX_WIDTH | index;
}
static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
{
unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
if (range < ARRAY_SIZE(efx_filter_range_table))
return efx_filter_range_table[range];
else
return EFX_FILTER_TABLE_COUNT; /* invalid */
}
static inline unsigned int efx_filter_id_index(u32 id)
{
return id & EFX_FILTER_INDEX_MASK;
}
static inline u8 efx_filter_id_flags(u32 id)
{
unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
if (range < EFX_FILTER_MATCH_PRI_COUNT)
return EFX_FILTER_FLAG_RX;
else
return EFX_FILTER_FLAG_TX;
}
u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
{
struct efx_filter_state *state = efx->filter_state;
unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
enum efx_filter_table_id table_id;
do {
table_id = efx_filter_range_table[range];
if (state->table[table_id].size != 0)
return range << EFX_FILTER_INDEX_WIDTH |
state->table[table_id].size;
} while (range--);
return 0;
}
/**
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
* @spec: Specification for the filter
* @replace_equal: Flag for whether the specified filter may replace an
* existing filter with equal priority
*
* On success, return the filter ID.
* On failure, return a negative error code.
*
* If an existing filter has equal match values to the new filter
* spec, then the new filter might replace it, depending on the
* relative priorities. If the existing filter has lower priority, or
* if @replace_equal is set and it has equal priority, then it is
* replaced. Otherwise the function fails, returning -%EPERM if
* the existing filter has higher priority or -%EEXIST if it has
* equal priority.
*/
s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
bool replace_equal)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = efx_filter_spec_table(state, spec);
efx_oword_t filter;
int rep_index, ins_index;
unsigned int depth = 0;
int rc;
if (!table || table->size == 0)
return -EINVAL;
netif_vdbg(efx, hw, efx->net_dev,
"%s: type %d search_depth=%d", __func__, spec->type,
table->search_depth[spec->type]);
if (table->id == EFX_FILTER_TABLE_RX_DEF) {
/* One filter spec per type */
BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
rep_index = spec->type - EFX_FILTER_UC_DEF;
ins_index = rep_index;
spin_lock_bh(&state->lock);
} else {
/* Search concurrently for
* (1) a filter to be replaced (rep_index): any filter
* with the same match values, up to the current
* search depth for this type, and
* (2) the insertion point (ins_index): (1) or any
* free slot before it or up to the maximum search
* depth for this priority
* We fail if we cannot find (2).
*
* We can stop once either
* (a) we find (1), in which case we have definitely
* found (2) as well; or
* (b) we have searched exhaustively for (1), and have
* either found (2) or searched exhaustively for it
*/
u32 key = efx_filter_build(&filter, spec);
unsigned int hash = efx_filter_hash(key);
unsigned int incr = efx_filter_increment(key);
unsigned int max_rep_depth = table->search_depth[spec->type];
unsigned int max_ins_depth =
spec->priority <= EFX_FILTER_PRI_HINT ?
FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX;
unsigned int i = hash & (table->size - 1);
ins_index = -1;
depth = 1;
spin_lock_bh(&state->lock);
for (;;) {
if (!test_bit(i, table->used_bitmap)) {
if (ins_index < 0)
ins_index = i;
} else if (efx_filter_equal(spec, &table->spec[i])) {
/* Case (a) */
if (ins_index < 0)
ins_index = i;
rep_index = i;
break;
}
if (depth >= max_rep_depth &&
(ins_index >= 0 || depth >= max_ins_depth)) {
/* Case (b) */
if (ins_index < 0) {
rc = -EBUSY;
goto out;
}
rep_index = -1;
break;
}
i = (i + incr) & (table->size - 1);
++depth;
}
}
/* If we found a filter to be replaced, check whether we
* should do so
*/
if (rep_index >= 0) {
struct efx_filter_spec *saved_spec = &table->spec[rep_index];
if (spec->priority == saved_spec->priority && !replace_equal) {
rc = -EEXIST;
goto out;
}
if (spec->priority < saved_spec->priority) {
rc = -EPERM;
goto out;
}
}
/* Insert the filter */
if (ins_index != rep_index) {
__set_bit(ins_index, table->used_bitmap);
++table->used;
}
table->spec[ins_index] = *spec;
if (table->id == EFX_FILTER_TABLE_RX_DEF) {
efx_filter_push_rx_config(efx);
} else {
if (table->search_depth[spec->type] < depth) {
table->search_depth[spec->type] = depth;
if (spec->flags & EFX_FILTER_FLAG_TX)
efx_filter_push_tx_limits(efx);
else
efx_filter_push_rx_config(efx);
}
efx_writeo(efx, &filter,
table->offset + table->step * ins_index);
/* If we were able to replace a filter by inserting
* at a lower depth, clear the replaced filter
*/
if (ins_index != rep_index && rep_index >= 0)
efx_filter_table_clear_entry(efx, table, rep_index);
}
netif_vdbg(efx, hw, efx->net_dev,
"%s: filter type %d index %d rxq %u set",
__func__, spec->type, ins_index, spec->dmaq_id);
rc = efx_filter_make_id(spec, ins_index);
out:
spin_unlock_bh(&state->lock);
return rc;
}
static void efx_filter_table_clear_entry(struct efx_nic *efx,
struct efx_filter_table *table,
unsigned int filter_idx)
{
static efx_oword_t filter;
if (table->id == EFX_FILTER_TABLE_RX_DEF) {
/* RX default filters must always exist */
efx_filter_reset_rx_def(efx, filter_idx);
efx_filter_push_rx_config(efx);
} else if (test_bit(filter_idx, table->used_bitmap)) {
__clear_bit(filter_idx, table->used_bitmap);
--table->used;
memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
efx_writeo(efx, &filter,
table->offset + table->step * filter_idx);
}
}
/**
* efx_filter_remove_id_safe - remove a filter by ID, carefully
* @efx: NIC from which to remove the filter
* @priority: Priority of filter, as passed to @efx_filter_insert_filter
* @filter_id: ID of filter, as returned by @efx_filter_insert_filter
*
* This function will range-check @filter_id, so it is safe to call
* with a value passed from userland.
*/
int efx_filter_remove_id_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id)
{
struct efx_filter_state *state = efx->filter_state;
enum efx_filter_table_id table_id;
struct efx_filter_table *table;
unsigned int filter_idx;
struct efx_filter_spec *spec;
u8 filter_flags;
int rc;
table_id = efx_filter_id_table_id(filter_id);
if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
return -ENOENT;
table = &state->table[table_id];
filter_idx = efx_filter_id_index(filter_id);
if (filter_idx >= table->size)
return -ENOENT;
spec = &table->spec[filter_idx];
filter_flags = efx_filter_id_flags(filter_id);
spin_lock_bh(&state->lock);
if (test_bit(filter_idx, table->used_bitmap) &&
spec->priority == priority) {
efx_filter_table_clear_entry(efx, table, filter_idx);
if (table->used == 0)
efx_filter_table_reset_search_depth(table);
rc = 0;
} else {
rc = -ENOENT;
}
spin_unlock_bh(&state->lock);
return rc;
}
/**
* efx_filter_get_filter_safe - retrieve a filter by ID, carefully
* @efx: NIC from which to remove the filter
* @priority: Priority of filter, as passed to @efx_filter_insert_filter
* @filter_id: ID of filter, as returned by @efx_filter_insert_filter
* @spec: Buffer in which to store filter specification
*
* This function will range-check @filter_id, so it is safe to call
* with a value passed from userland.
*/
int efx_filter_get_filter_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *spec_buf)
{
struct efx_filter_state *state = efx->filter_state;
enum efx_filter_table_id table_id;
struct efx_filter_table *table;
struct efx_filter_spec *spec;
unsigned int filter_idx;
u8 filter_flags;
int rc;
table_id = efx_filter_id_table_id(filter_id);
if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
return -ENOENT;
table = &state->table[table_id];
filter_idx = efx_filter_id_index(filter_id);
if (filter_idx >= table->size)
return -ENOENT;
spec = &table->spec[filter_idx];
filter_flags = efx_filter_id_flags(filter_id);
spin_lock_bh(&state->lock);
if (test_bit(filter_idx, table->used_bitmap) &&
spec->priority == priority) {
*spec_buf = *spec;
rc = 0;
} else {
rc = -ENOENT;
}
spin_unlock_bh(&state->lock);
return rc;
}
static void efx_filter_table_clear(struct efx_nic *efx,
enum efx_filter_table_id table_id,
enum efx_filter_priority priority)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = &state->table[table_id];
unsigned int filter_idx;
spin_lock_bh(&state->lock);
for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
if (table->spec[filter_idx].priority <= priority)
efx_filter_table_clear_entry(efx, table, filter_idx);
if (table->used == 0)
efx_filter_table_reset_search_depth(table);
spin_unlock_bh(&state->lock);
}
/**
* efx_filter_clear_rx - remove RX filters by priority
* @efx: NIC from which to remove the filters
* @priority: Maximum priority to remove
*/
void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
{
efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
}
u32 efx_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority)
{
struct efx_filter_state *state = efx->filter_state;
enum efx_filter_table_id table_id;
struct efx_filter_table *table;
unsigned int filter_idx;
u32 count = 0;
spin_lock_bh(&state->lock);
for (table_id = EFX_FILTER_TABLE_RX_IP;
table_id <= EFX_FILTER_TABLE_RX_DEF;
table_id++) {
table = &state->table[table_id];
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
if (test_bit(filter_idx, table->used_bitmap) &&
table->spec[filter_idx].priority == priority)
++count;
}
}
spin_unlock_bh(&state->lock);
return count;
}
s32 efx_filter_get_rx_ids(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 *buf, u32 size)
{
struct efx_filter_state *state = efx->filter_state;
enum efx_filter_table_id table_id;
struct efx_filter_table *table;
unsigned int filter_idx;
s32 count = 0;
spin_lock_bh(&state->lock);
for (table_id = EFX_FILTER_TABLE_RX_IP;
table_id <= EFX_FILTER_TABLE_RX_DEF;
table_id++) {
table = &state->table[table_id];
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
if (test_bit(filter_idx, table->used_bitmap) &&
table->spec[filter_idx].priority == priority) {
if (count == size) {
count = -EMSGSIZE;
goto out;
}
buf[count++] = efx_filter_make_id(
&table->spec[filter_idx], filter_idx);
}
}
}
out:
spin_unlock_bh(&state->lock);
return count;
}
/* Restore filter stater after reset */
void efx_restore_filters(struct efx_nic *efx)
{
struct efx_filter_state *state = efx->filter_state;
enum efx_filter_table_id table_id;
struct efx_filter_table *table;
efx_oword_t filter;
unsigned int filter_idx;
spin_lock_bh(&state->lock);
for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
table = &state->table[table_id];
/* Check whether this is a regular register table */
if (table->step == 0)
continue;
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
if (!test_bit(filter_idx, table->used_bitmap))
continue;
efx_filter_build(&filter, &table->spec[filter_idx]);
efx_writeo(efx, &filter,
table->offset + table->step * filter_idx);
}
}
efx_filter_push_rx_config(efx);
efx_filter_push_tx_limits(efx);
spin_unlock_bh(&state->lock);
}
int efx_probe_filters(struct efx_nic *efx)
{
struct efx_filter_state *state;
struct efx_filter_table *table;
unsigned table_id;
state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
if (!state)
return -ENOMEM;
efx->filter_state = state;
spin_lock_init(&state->lock);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
#ifdef CONFIG_RFS_ACCEL
state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
sizeof(*state->rps_flow_id),
GFP_KERNEL);
if (!state->rps_flow_id)
goto fail;
#endif
table = &state->table[EFX_FILTER_TABLE_RX_IP];
table->id = EFX_FILTER_TABLE_RX_IP;
table->offset = FR_BZ_RX_FILTER_TBL0;
table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
table->step = FR_BZ_RX_FILTER_TBL0_STEP;
}
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
table = &state->table[EFX_FILTER_TABLE_RX_MAC];
table->id = EFX_FILTER_TABLE_RX_MAC;
table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
table = &state->table[EFX_FILTER_TABLE_RX_DEF];
table->id = EFX_FILTER_TABLE_RX_DEF;
table->size = EFX_FILTER_SIZE_RX_DEF;
table = &state->table[EFX_FILTER_TABLE_TX_MAC];
table->id = EFX_FILTER_TABLE_TX_MAC;
table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
}
for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
table = &state->table[table_id];
if (table->size == 0)
continue;
table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
sizeof(unsigned long),
GFP_KERNEL);
if (!table->used_bitmap)
goto fail;
table->spec = vzalloc(table->size * sizeof(*table->spec));
if (!table->spec)
goto fail;
}
if (state->table[EFX_FILTER_TABLE_RX_DEF].size) {
/* RX default filters must always exist */
unsigned i;
for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++)
efx_filter_reset_rx_def(efx, i);
}
efx_filter_push_rx_config(efx);
return 0;
fail:
efx_remove_filters(efx);
return -ENOMEM;
}
void efx_remove_filters(struct efx_nic *efx)
{
struct efx_filter_state *state = efx->filter_state;
enum efx_filter_table_id table_id;
for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
kfree(state->table[table_id].used_bitmap);
vfree(state->table[table_id].spec);
}
#ifdef CONFIG_RFS_ACCEL
kfree(state->rps_flow_id);
#endif
kfree(state);
}
/* Update scatter enable flags for filters pointing to our own RX queues */
void efx_filter_update_rx_scatter(struct efx_nic *efx)
{
struct efx_filter_state *state = efx->filter_state;
enum efx_filter_table_id table_id;
struct efx_filter_table *table;
efx_oword_t filter;
unsigned int filter_idx;
spin_lock_bh(&state->lock);
for (table_id = EFX_FILTER_TABLE_RX_IP;
table_id <= EFX_FILTER_TABLE_RX_DEF;
table_id++) {
table = &state->table[table_id];
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
if (!test_bit(filter_idx, table->used_bitmap) ||
table->spec[filter_idx].dmaq_id >=
efx->n_rx_channels)
continue;
if (efx->rx_scatter)
table->spec[filter_idx].flags |=
EFX_FILTER_FLAG_RX_SCATTER;
else
table->spec[filter_idx].flags &=
~EFX_FILTER_FLAG_RX_SCATTER;
if (table_id == EFX_FILTER_TABLE_RX_DEF)
/* Pushed by efx_filter_push_rx_config() */
continue;
efx_filter_build(&filter, &table->spec[filter_idx]);
efx_writeo(efx, &filter,
table->offset + table->step * filter_idx);
}
}
efx_filter_push_rx_config(efx);
spin_unlock_bh(&state->lock);
}
#ifdef CONFIG_RFS_ACCEL
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_spec spec;
const struct iphdr *ip;
const __be16 *ports;
int nhoff;
int rc;
nhoff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_8021Q)) {
EFX_BUG_ON_PARANOID(skb_headlen(skb) <
nhoff + sizeof(struct vlan_hdr));
if (((const struct vlan_hdr *)skb->data + nhoff)->
h_vlan_encapsulated_proto != htons(ETH_P_IP))
return -EPROTONOSUPPORT;
/* This is IP over 802.1q VLAN. We can't filter on the
* IP 5-tuple and the vlan together, so just strip the
* vlan header and filter on the IP part.
*/
nhoff += sizeof(struct vlan_hdr);
} else if (skb->protocol != htons(ETH_P_IP)) {
return -EPROTONOSUPPORT;
}
/* RFS must validate the IP header length before calling us */
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
ip = (const struct iphdr *)(skb->data + nhoff);
if (ip_is_fragment(ip))
return -EPROTONOSUPPORT;
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
rxq_index);
rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
ip->daddr, ports[1], ip->saddr, ports[0]);
if (rc)
return rc;
rc = efx_filter_insert_filter(efx, &spec, true);
if (rc < 0)
return rc;
/* Remember this so we can check whether to expire the filter later */
state->rps_flow_id[rc] = flow_id;
channel = efx_get_channel(efx, skb_get_rx_queue(skb));
++channel->rfs_filters_added;
netif_info(efx, rx_status, efx->net_dev,
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
(ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
&ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
rxq_index, flow_id, rc);
return rc;
}
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
unsigned mask = table->size - 1;
unsigned index;
unsigned stop;
if (!spin_trylock_bh(&state->lock))
return false;
index = state->rps_expire_index;
stop = (index + quota) & mask;
while (index != stop) {
if (test_bit(index, table->used_bitmap) &&
table->spec[index].priority == EFX_FILTER_PRI_HINT &&
rps_may_expire_flow(efx->net_dev,
table->spec[index].dmaq_id,
state->rps_flow_id[index], index)) {
netif_info(efx, rx_status, efx->net_dev,
"expiring filter %d [flow %u]\n",
index, state->rps_flow_id[index]);
efx_filter_table_clear_entry(efx, table, index);
}
index = (index + 1) & mask;
}
state->rps_expire_index = stop;
if (table->used == 0)
efx_filter_table_reset_search_depth(table);
spin_unlock_bh(&state->lock);
return true;
}
#endif /* CONFIG_RFS_ACCEL */
...@@ -11,32 +11,49 @@ ...@@ -11,32 +11,49 @@
#define EFX_FILTER_H #define EFX_FILTER_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/if_ether.h>
#include <asm/byteorder.h>
/** /**
* enum efx_filter_type - type of hardware filter * enum efx_filter_match_flags - Flags for hardware filter match type
* @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple * @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address
* @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port) * @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address
* @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple * @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address
* @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port) * @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
* @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID * @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address
* @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address * @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
* @EFX_FILTER_UC_DEF: Matching all otherwise unmatched unicast * @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
* @EFX_FILTER_MC_DEF: Matching all otherwise unmatched multicast * @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
* @EFX_FILTER_UNSPEC: Match type is unspecified * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
* @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
* @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
* Used for RX default unicast and multicast/broadcast filters.
* *
* Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types. * Only some combinations are supported, depending on NIC type:
*
* - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
* local 2-tuple (only implemented for Falcon B0)
*
* - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
* or local 2-tuple, or local MAC with or without outer VID, and RX
* default filters
*
* - Huntington supports filter matching controlled by firmware, potentially
* using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
* with or without outer and inner VID
*/ */
enum efx_filter_type { enum efx_filter_match_flags {
EFX_FILTER_TCP_FULL = 0, EFX_FILTER_MATCH_REM_HOST = 0x0001,
EFX_FILTER_TCP_WILD, EFX_FILTER_MATCH_LOC_HOST = 0x0002,
EFX_FILTER_UDP_FULL, EFX_FILTER_MATCH_REM_MAC = 0x0004,
EFX_FILTER_UDP_WILD, EFX_FILTER_MATCH_REM_PORT = 0x0008,
EFX_FILTER_MAC_FULL = 4, EFX_FILTER_MATCH_LOC_MAC = 0x0010,
EFX_FILTER_MAC_WILD, EFX_FILTER_MATCH_LOC_PORT = 0x0020,
EFX_FILTER_UC_DEF = 8, EFX_FILTER_MATCH_ETHER_TYPE = 0x0040,
EFX_FILTER_MC_DEF, EFX_FILTER_MATCH_INNER_VID = 0x0080,
EFX_FILTER_TYPE_COUNT, /* number of specific types */ EFX_FILTER_MATCH_OUTER_VID = 0x0100,
EFX_FILTER_UNSPEC = 0xf, EFX_FILTER_MATCH_IP_PROTO = 0x0200,
EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400,
}; };
/** /**
...@@ -61,37 +78,75 @@ enum efx_filter_priority { ...@@ -61,37 +78,75 @@ enum efx_filter_priority {
* according to the indirection table. * according to the indirection table.
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
* queue. * queue.
* @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the
* network stack. The filter must have a priority of
* %EFX_FILTER_PRI_REQUIRED. It can be steered by a replacement
* request with priority %EFX_FILTER_PRI_MANUAL, and a removal
* request with priority %EFX_FILTER_PRI_MANUAL will reset the
* steering (but not remove the filter).
* @EFX_FILTER_FLAG_RX: Filter is for RX * @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX * @EFX_FILTER_FLAG_TX: Filter is for TX
*/ */
enum efx_filter_flags { enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01, EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02, EFX_FILTER_FLAG_RX_SCATTER = 0x02,
EFX_FILTER_FLAG_RX_STACK = 0x04,
EFX_FILTER_FLAG_RX = 0x08, EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10, EFX_FILTER_FLAG_TX = 0x10,
}; };
/** /**
* struct efx_filter_spec - specification for a hardware filter * struct efx_filter_spec - specification for a hardware filter
* @type: Type of match to be performed, from &enum efx_filter_type * @match_flags: Match type flags, from &enum efx_filter_match_flags
* @priority: Priority of the filter, from &enum efx_filter_priority * @priority: Priority of the filter, from &enum efx_filter_priority
* @flags: Miscellaneous flags, from &enum efx_filter_flags * @flags: Miscellaneous flags, from &enum efx_filter_flags
* @dmaq_id: Source/target queue index * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set
* @data: Match data (type-dependent) * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
* an RX drop filter
* @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
* @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
* @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
* %EFX_FILTER_MATCH_LOC_MAC_IG is set
* @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set
* @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set
* @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO
* is set
* @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set
* @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
* @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
* @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
* *
* Use the efx_filter_set_*() functions to initialise the @type and * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
* @data fields. * used to initialise the structure. The efx_filter_set_*() functions
* may then be used to set @rss_context, @match_flags and related
* fields.
* *
* The @priority field is used by software to determine whether a new * The @priority field is used by software to determine whether a new
* filter may replace an old one. The hardware priority of a filter * filter may replace an old one. The hardware priority of a filter
* depends on the filter type. * depends on which fields are matched.
*/ */
struct efx_filter_spec { struct efx_filter_spec {
u8 type:4; u32 match_flags:12;
u8 priority:4; u32 priority:2;
u8 flags; u32 flags:6;
u16 dmaq_id; u32 dmaq_id:12;
u32 data[3]; u32 rss_context;
__be16 outer_vid __aligned(4); /* allow jhash2() of match values */
__be16 inner_vid;
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
__be16 ether_type;
u8 ip_proto;
__be32 loc_host[4];
__be32 rem_host[4];
__be16 loc_port;
__be16 rem_port;
/* total 64 bytes */
};
enum {
EFX_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff,
EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff
}; };
static inline void efx_filter_init_rx(struct efx_filter_spec *spec, static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
...@@ -99,39 +154,116 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec, ...@@ -99,39 +154,116 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
enum efx_filter_flags flags, enum efx_filter_flags flags,
unsigned rxq_id) unsigned rxq_id)
{ {
spec->type = EFX_FILTER_UNSPEC; memset(spec, 0, sizeof(*spec));
spec->priority = priority; spec->priority = priority;
spec->flags = EFX_FILTER_FLAG_RX | flags; spec->flags = EFX_FILTER_FLAG_RX | flags;
spec->rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
spec->dmaq_id = rxq_id; spec->dmaq_id = rxq_id;
} }
static inline void efx_filter_init_tx(struct efx_filter_spec *spec, static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
unsigned txq_id) unsigned txq_id)
{ {
spec->type = EFX_FILTER_UNSPEC; memset(spec, 0, sizeof(*spec));
spec->priority = EFX_FILTER_PRI_REQUIRED; spec->priority = EFX_FILTER_PRI_REQUIRED;
spec->flags = EFX_FILTER_FLAG_TX; spec->flags = EFX_FILTER_FLAG_TX;
spec->dmaq_id = txq_id; spec->dmaq_id = txq_id;
} }
extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto, /**
__be32 host, __be16 port); * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec, * @spec: Specification to initialise
u8 *proto, __be32 *host, __be16 *port); * @proto: Transport layer protocol number
extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto, * @host: Local host address (network byte order)
__be32 host, __be16 port, * @port: Local port (network byte order)
__be32 rhost, __be16 rport); */
extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec, static inline int
u8 *proto, __be32 *host, __be16 *port, efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
__be32 *rhost, __be16 *rport); __be32 host, __be16 port)
extern int efx_filter_set_eth_local(struct efx_filter_spec *spec, {
u16 vid, const u8 *addr); spec->match_flags |=
extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec, EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
u16 *vid, u8 *addr); EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
extern int efx_filter_set_uc_def(struct efx_filter_spec *spec); spec->ether_type = htons(ETH_P_IP);
extern int efx_filter_set_mc_def(struct efx_filter_spec *spec); spec->ip_proto = proto;
spec->loc_host[0] = host;
spec->loc_port = port;
return 0;
}
/**
* efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
* @spec: Specification to initialise
* @proto: Transport layer protocol number
* @lhost: Local host address (network byte order)
* @lport: Local port (network byte order)
* @rhost: Remote host address (network byte order)
* @rport: Remote port (network byte order)
*/
static inline int
efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
__be32 lhost, __be16 lport,
__be32 rhost, __be16 rport)
{
spec->match_flags |=
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
spec->ether_type = htons(ETH_P_IP);
spec->ip_proto = proto;
spec->loc_host[0] = lhost;
spec->loc_port = lport;
spec->rem_host[0] = rhost;
spec->rem_port = rport;
return 0;
}
enum { enum {
EFX_FILTER_VID_UNSPEC = 0xffff, EFX_FILTER_VID_UNSPEC = 0xffff,
}; };
/**
* efx_filter_set_eth_local - specify local Ethernet address and/or VID
* @spec: Specification to initialise
* @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
* @addr: Local Ethernet MAC address, or %NULL
*/
static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
u16 vid, const u8 *addr)
{
if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL)
return -EINVAL;
if (vid != EFX_FILTER_VID_UNSPEC) {
spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
spec->outer_vid = htons(vid);
}
if (addr != NULL) {
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
memcpy(spec->loc_mac, addr, ETH_ALEN);
}
return 0;
}
/**
* efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
* @spec: Specification to initialise
*/
static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec)
{
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
return 0;
}
/**
* efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
* @spec: Specification to initialise
*/
static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
{
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
spec->loc_mac[0] = 1;
return 0;
}
#endif /* EFX_FILTER_H */ #endif /* EFX_FILTER_H */
...@@ -128,6 +128,60 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); ...@@ -128,6 +128,60 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value) EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
#define MCDI_DWORD(_buf, _field) \ #define MCDI_DWORD(_buf, _field) \
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0) EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1)
#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \
_name2, _value2) \
EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2)
#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \
_name2, _value2, _name3, _value3) \
EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2, \
MC_CMD_ ## _name3, _value3)
#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \
_name2, _value2, _name3, _value3, \
_name4, _value4) \
EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2, \
MC_CMD_ ## _name3, _value3, \
MC_CMD_ ## _name4, _value4)
#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \
_name2, _value2, _name3, _value3, \
_name4, _value4, _name5, _value5) \
EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2, \
MC_CMD_ ## _name3, _value3, \
MC_CMD_ ## _name4, _value4, \
MC_CMD_ ## _name5, _value5)
#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \
_name2, _value2, _name3, _value3, \
_name4, _value4, _name5, _value5, \
_name6, _value6) \
EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2, \
MC_CMD_ ## _name3, _value3, \
MC_CMD_ ## _name4, _value4, \
MC_CMD_ ## _name5, _value5, \
MC_CMD_ ## _name6, _value6)
#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \
_name2, _value2, _name3, _value3, \
_name4, _value4, _name5, _value5, \
_name6, _value6, _name7, _value7) \
EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \
MC_CMD_ ## _name1, _value1, \
MC_CMD_ ## _name2, _value2, \
MC_CMD_ ## _name3, _value3, \
MC_CMD_ ## _name4, _value4, \
MC_CMD_ ## _name5, _value5, \
MC_CMD_ ## _name6, _value6, \
MC_CMD_ ## _name7, _value7)
#define MCDI_SET_QWORD(_buf, _field, _value) \ #define MCDI_SET_QWORD(_buf, _field, _value) \
do { \ do { \
EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \ EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
......
...@@ -861,7 +861,7 @@ void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) ...@@ -861,7 +861,7 @@ void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
int efx_mcdi_set_mac(struct efx_nic *efx) int efx_mcdi_set_mac(struct efx_nic *efx)
{ {
u32 reject, fcntl; u32 fcntl;
MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN); MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
...@@ -873,12 +873,9 @@ int efx_mcdi_set_mac(struct efx_nic *efx) ...@@ -873,12 +873,9 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
EFX_MAX_FRAME_LEN(efx->net_dev->mtu)); EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0); MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
/* The MCDI command provides for controlling accept/reject /* Set simple MAC filter for Siena */
* of broadcast packets too, but the driver doesn't currently MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
* expose this. */ SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
reject = (efx->promiscuous) ? 0 :
(1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN);
MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject);
switch (efx->wanted_fc) { switch (efx->wanted_fc) {
case EFX_FC_RX | EFX_FC_TX: case EFX_FC_RX | EFX_FC_TX:
...@@ -926,21 +923,19 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, ...@@ -926,21 +923,19 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
{ {
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
int rc; int rc;
efx_dword_t *cmd_ptr;
int period = enable ? 1000 : 0; int period = enable ? 1000 : 0;
BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0); BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr); MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD); MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
EFX_POPULATE_DWORD_7(*cmd_ptr, MAC_STATS_IN_DMA, !!enable,
MC_CMD_MAC_STATS_IN_DMA, !!enable, MAC_STATS_IN_CLEAR, clear,
MC_CMD_MAC_STATS_IN_CLEAR, clear, MAC_STATS_IN_PERIODIC_CHANGE, 1,
MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1, MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable, MAC_STATS_IN_PERIODIC_CLEAR, 0,
MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0, MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1, MAC_STATS_IN_PERIOD_MS, period);
MC_CMD_MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
......
...@@ -22,9 +22,10 @@ ...@@ -22,9 +22,10 @@
#include "mcdi.h" #include "mcdi.h"
#include "mcdi_pcol.h" #include "mcdi_pcol.h"
#define EFX_SPI_VERIFY_BUF_LEN 16 #define FALCON_SPI_VERIFY_BUF_LEN 16
struct efx_mtd_partition { struct efx_mtd_partition {
struct list_head node;
struct mtd_info mtd; struct mtd_info mtd;
union { union {
struct { struct {
...@@ -32,8 +33,12 @@ struct efx_mtd_partition { ...@@ -32,8 +33,12 @@ struct efx_mtd_partition {
u8 nvram_type; u8 nvram_type;
u16 fw_subtype; u16 fw_subtype;
} mcdi; } mcdi;
struct {
const struct falcon_spi_device *spi;
size_t offset; size_t offset;
} falcon;
}; };
const char *dev_type_name;
const char *type_name; const char *type_name;
char name[IFNAMSIZ + 20]; char name[IFNAMSIZ + 20];
}; };
...@@ -47,21 +52,6 @@ struct efx_mtd_ops { ...@@ -47,21 +52,6 @@ struct efx_mtd_ops {
int (*sync)(struct mtd_info *mtd); int (*sync)(struct mtd_info *mtd);
}; };
struct efx_mtd {
struct list_head node;
struct efx_nic *efx;
const struct efx_spi_device *spi;
const char *name;
const struct efx_mtd_ops *ops;
size_t n_parts;
struct efx_mtd_partition part[0];
};
#define efx_for_each_partition(part, efx_mtd) \
for ((part) = &(efx_mtd)->part[0]; \
(part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
(part)++)
#define to_efx_mtd_partition(mtd) \ #define to_efx_mtd_partition(mtd) \
container_of(mtd, struct efx_mtd_partition, mtd) container_of(mtd, struct efx_mtd_partition, mtd)
...@@ -71,11 +61,10 @@ static int siena_mtd_probe(struct efx_nic *efx); ...@@ -71,11 +61,10 @@ static int siena_mtd_probe(struct efx_nic *efx);
/* SPI utilities */ /* SPI utilities */
static int static int
efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible) falcon_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
{ {
struct efx_mtd *efx_mtd = part->mtd.priv; const struct falcon_spi_device *spi = part->falcon.spi;
const struct efx_spi_device *spi = efx_mtd->spi; struct efx_nic *efx = part->mtd.priv;
struct efx_nic *efx = efx_mtd->efx;
u8 status; u8 status;
int rc, i; int rc, i;
...@@ -93,12 +82,13 @@ efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible) ...@@ -93,12 +82,13 @@ efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
if (signal_pending(current)) if (signal_pending(current))
return -EINTR; return -EINTR;
} }
pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name); pr_err("%s: timed out waiting for %s\n",
part->name, part->dev_type_name);
return -ETIMEDOUT; return -ETIMEDOUT;
} }
static int static int
efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi) falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi)
{ {
const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 | const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
SPI_STATUS_BP0); SPI_STATUS_BP0);
...@@ -133,14 +123,13 @@ efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi) ...@@ -133,14 +123,13 @@ efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
} }
static int static int
efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len) falcon_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
{ {
struct efx_mtd *efx_mtd = part->mtd.priv; const struct falcon_spi_device *spi = part->falcon.spi;
const struct efx_spi_device *spi = efx_mtd->spi; struct efx_nic *efx = part->mtd.priv;
struct efx_nic *efx = efx_mtd->efx;
unsigned pos, block_len; unsigned pos, block_len;
u8 empty[EFX_SPI_VERIFY_BUF_LEN]; u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
u8 buffer[EFX_SPI_VERIFY_BUF_LEN]; u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
int rc; int rc;
if (len != spi->erase_size) if (len != spi->erase_size)
...@@ -149,7 +138,7 @@ efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len) ...@@ -149,7 +138,7 @@ efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
if (spi->erase_command == 0) if (spi->erase_command == 0)
return -EOPNOTSUPP; return -EOPNOTSUPP;
rc = efx_spi_unlock(efx, spi); rc = falcon_spi_unlock(efx, spi);
if (rc) if (rc)
return rc; return rc;
rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
...@@ -159,7 +148,7 @@ efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len) ...@@ -159,7 +148,7 @@ efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
NULL, 0); NULL, 0);
if (rc) if (rc)
return rc; return rc;
rc = efx_spi_slow_wait(part, false); rc = falcon_spi_slow_wait(part, false);
/* Verify the entire region has been wiped */ /* Verify the entire region has been wiped */
memset(empty, 0xff, sizeof(empty)); memset(empty, 0xff, sizeof(empty));
...@@ -185,10 +174,10 @@ efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len) ...@@ -185,10 +174,10 @@ efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
{ {
struct efx_mtd *efx_mtd = mtd->priv; struct efx_nic *efx = mtd->priv;
int rc; int rc;
rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len); rc = efx->mtd_ops->erase(mtd, erase->addr, erase->len);
if (rc == 0) { if (rc == 0) {
erase->state = MTD_ERASE_DONE; erase->state = MTD_ERASE_DONE;
} else { } else {
...@@ -202,13 +191,13 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) ...@@ -202,13 +191,13 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
static void efx_mtd_sync(struct mtd_info *mtd) static void efx_mtd_sync(struct mtd_info *mtd)
{ {
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv; struct efx_nic *efx = mtd->priv;
int rc; int rc;
rc = efx_mtd->ops->sync(mtd); rc = efx->mtd_ops->sync(mtd);
if (rc) if (rc)
pr_err("%s: %s sync failed (%d)\n", pr_err("%s: %s sync failed (%d)\n",
part->name, efx_mtd->name, rc); part->name, part->dev_type_name, rc);
} }
static void efx_mtd_remove_partition(struct efx_mtd_partition *part) static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
...@@ -222,86 +211,84 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part) ...@@ -222,86 +211,84 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
ssleep(1); ssleep(1);
} }
WARN_ON(rc); WARN_ON(rc);
list_del(&part->node);
} }
static void efx_mtd_remove_device(struct efx_mtd *efx_mtd) static void efx_mtd_rename_partition(struct efx_mtd_partition *part)
{ {
struct efx_mtd_partition *part; struct efx_nic *efx = part->mtd.priv;
efx_for_each_partition(part, efx_mtd)
efx_mtd_remove_partition(part);
list_del(&efx_mtd->node);
kfree(efx_mtd);
}
static void efx_mtd_rename_device(struct efx_mtd *efx_mtd) if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
{ snprintf(part->name, sizeof(part->name), "%s %s:%02x",
struct efx_mtd_partition *part; efx->name, part->type_name, part->mcdi.fw_subtype);
efx_for_each_partition(part, efx_mtd)
if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
snprintf(part->name, sizeof(part->name),
"%s %s:%02x", efx_mtd->efx->name,
part->type_name, part->mcdi.fw_subtype);
else else
snprintf(part->name, sizeof(part->name), snprintf(part->name, sizeof(part->name), "%s %s",
"%s %s", efx_mtd->efx->name, efx->name, part->type_name);
part->type_name);
} }
static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd) static int efx_mtd_add(struct efx_nic *efx,
struct efx_mtd_partition *parts, size_t n_parts)
{ {
struct efx_mtd_partition *part; struct efx_mtd_partition *part;
size_t i;
efx_mtd->efx = efx; for (i = 0; i < n_parts; i++) {
part = &parts[i];
efx_mtd_rename_device(efx_mtd);
efx_for_each_partition(part, efx_mtd) {
part->mtd.writesize = 1; part->mtd.writesize = 1;
part->mtd.owner = THIS_MODULE; part->mtd.owner = THIS_MODULE;
part->mtd.priv = efx_mtd; part->mtd.priv = efx;
part->mtd.name = part->name; part->mtd.name = part->name;
part->mtd._erase = efx_mtd_erase; part->mtd._erase = efx_mtd_erase;
part->mtd._read = efx_mtd->ops->read; part->mtd._read = efx->mtd_ops->read;
part->mtd._write = efx_mtd->ops->write; part->mtd._write = efx->mtd_ops->write;
part->mtd._sync = efx_mtd_sync; part->mtd._sync = efx_mtd_sync;
efx_mtd_rename_partition(part);
if (mtd_device_register(&part->mtd, NULL, 0)) if (mtd_device_register(&part->mtd, NULL, 0))
goto fail; goto fail;
/* Add to list in order - efx_mtd_remove() depends on this */
list_add_tail(&part->node, &efx->mtd_list);
} }
list_add(&efx_mtd->node, &efx->mtd_list);
return 0; return 0;
fail: fail:
while (part != &efx_mtd->part[0]) { while (i--)
--part; efx_mtd_remove_partition(&parts[i]);
efx_mtd_remove_partition(part);
}
/* Failure is unlikely here, but probably means we're out of memory */ /* Failure is unlikely here, but probably means we're out of memory */
return -ENOMEM; return -ENOMEM;
} }
void efx_mtd_remove(struct efx_nic *efx) void efx_mtd_remove(struct efx_nic *efx)
{ {
struct efx_mtd *efx_mtd, *next; struct efx_mtd_partition *parts, *part, *next;
WARN_ON(efx_dev_registered(efx)); WARN_ON(efx_dev_registered(efx));
list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node) if (list_empty(&efx->mtd_list))
efx_mtd_remove_device(efx_mtd); return;
parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition,
node);
list_for_each_entry_safe(part, next, &efx->mtd_list, node)
efx_mtd_remove_partition(part);
kfree(parts);
} }
void efx_mtd_rename(struct efx_nic *efx) void efx_mtd_rename(struct efx_nic *efx)
{ {
struct efx_mtd *efx_mtd; struct efx_mtd_partition *part;
ASSERT_RTNL(); ASSERT_RTNL();
list_for_each_entry(efx_mtd, &efx->mtd_list, node) list_for_each_entry(part, &efx->mtd_list, node)
efx_mtd_rename_device(efx_mtd); efx_mtd_rename_partition(part);
} }
int efx_mtd_probe(struct efx_nic *efx) int efx_mtd_probe(struct efx_nic *efx)
...@@ -318,17 +305,15 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start, ...@@ -318,17 +305,15 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer) size_t len, size_t *retlen, u8 *buffer)
{ {
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv; struct efx_nic *efx = mtd->priv;
const struct efx_spi_device *spi = efx_mtd->spi;
struct efx_nic *efx = efx_mtd->efx;
struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_nic_data *nic_data = efx->nic_data;
int rc; int rc;
rc = mutex_lock_interruptible(&nic_data->spi_lock); rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc) if (rc)
return rc; return rc;
rc = falcon_spi_read(efx, spi, part->offset + start, len, rc = falcon_spi_read(efx, part->falcon.spi, part->falcon.offset + start,
retlen, buffer); len, retlen, buffer);
mutex_unlock(&nic_data->spi_lock); mutex_unlock(&nic_data->spi_lock);
return rc; return rc;
} }
...@@ -336,15 +321,14 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start, ...@@ -336,15 +321,14 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
{ {
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv; struct efx_nic *efx = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_nic_data *nic_data = efx->nic_data;
int rc; int rc;
rc = mutex_lock_interruptible(&nic_data->spi_lock); rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc) if (rc)
return rc; return rc;
rc = efx_spi_erase(part, part->offset + start, len); rc = falcon_spi_erase(part, part->falcon.offset + start, len);
mutex_unlock(&nic_data->spi_lock); mutex_unlock(&nic_data->spi_lock);
return rc; return rc;
} }
...@@ -353,17 +337,15 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start, ...@@ -353,17 +337,15 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, const u8 *buffer) size_t len, size_t *retlen, const u8 *buffer)
{ {
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv; struct efx_nic *efx = mtd->priv;
const struct efx_spi_device *spi = efx_mtd->spi;
struct efx_nic *efx = efx_mtd->efx;
struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_nic_data *nic_data = efx->nic_data;
int rc; int rc;
rc = mutex_lock_interruptible(&nic_data->spi_lock); rc = mutex_lock_interruptible(&nic_data->spi_lock);
if (rc) if (rc)
return rc; return rc;
rc = falcon_spi_write(efx, spi, part->offset + start, len, rc = falcon_spi_write(efx, part->falcon.spi,
retlen, buffer); part->falcon.offset + start, len, retlen, buffer);
mutex_unlock(&nic_data->spi_lock); mutex_unlock(&nic_data->spi_lock);
return rc; return rc;
} }
...@@ -371,13 +353,12 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start, ...@@ -371,13 +353,12 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
static int falcon_mtd_sync(struct mtd_info *mtd) static int falcon_mtd_sync(struct mtd_info *mtd)
{ {
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv; struct efx_nic *efx = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_nic_data *nic_data = efx->nic_data;
int rc; int rc;
mutex_lock(&nic_data->spi_lock); mutex_lock(&nic_data->spi_lock);
rc = efx_spi_slow_wait(part, true); rc = falcon_spi_slow_wait(part, true);
mutex_unlock(&nic_data->spi_lock); mutex_unlock(&nic_data->spi_lock);
return rc; return rc;
} }
...@@ -392,66 +373,50 @@ static const struct efx_mtd_ops falcon_mtd_ops = { ...@@ -392,66 +373,50 @@ static const struct efx_mtd_ops falcon_mtd_ops = {
static int falcon_mtd_probe(struct efx_nic *efx) static int falcon_mtd_probe(struct efx_nic *efx)
{ {
struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_nic_data *nic_data = efx->nic_data;
struct efx_spi_device *spi; struct efx_mtd_partition *parts;
struct efx_mtd *efx_mtd; struct falcon_spi_device *spi;
size_t n_parts;
int rc = -ENODEV; int rc = -ENODEV;
ASSERT_RTNL(); ASSERT_RTNL();
spi = &nic_data->spi_flash; efx->mtd_ops = &falcon_mtd_ops;
if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
GFP_KERNEL);
if (!efx_mtd)
return -ENOMEM;
efx_mtd->spi = spi; /* Allocate space for maximum number of partitions */
efx_mtd->name = "flash"; parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
efx_mtd->ops = &falcon_mtd_ops; n_parts = 0;
efx_mtd->n_parts = 1; spi = &nic_data->spi_flash;
efx_mtd->part[0].mtd.type = MTD_NORFLASH; if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH; parts[n_parts].falcon.spi = spi;
efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; parts[n_parts].falcon.offset = FALCON_FLASH_BOOTCODE_START;
efx_mtd->part[0].mtd.erasesize = spi->erase_size; parts[n_parts].dev_type_name = "flash";
efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START; parts[n_parts].type_name = "sfc_flash_bootrom";
efx_mtd->part[0].type_name = "sfc_flash_bootrom"; parts[n_parts].mtd.type = MTD_NORFLASH;
parts[n_parts].mtd.flags = MTD_CAP_NORFLASH;
rc = efx_mtd_probe_device(efx, efx_mtd); parts[n_parts].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
if (rc) { parts[n_parts].mtd.erasesize = spi->erase_size;
kfree(efx_mtd); n_parts++;
return rc;
}
} }
spi = &nic_data->spi_eeprom; spi = &nic_data->spi_eeprom;
if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) { if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]), parts[n_parts].falcon.spi = spi;
GFP_KERNEL); parts[n_parts].falcon.offset = FALCON_EEPROM_BOOTCONFIG_START;
if (!efx_mtd) parts[n_parts].dev_type_name = "EEPROM";
return -ENOMEM; parts[n_parts].type_name = "sfc_bootconfig";
parts[n_parts].mtd.type = MTD_RAM;
efx_mtd->spi = spi; parts[n_parts].mtd.flags = MTD_CAP_RAM;
efx_mtd->name = "EEPROM"; parts[n_parts].mtd.size =
efx_mtd->ops = &falcon_mtd_ops; min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
FALCON_EEPROM_BOOTCONFIG_START;
efx_mtd->n_parts = 1; parts[n_parts].mtd.erasesize = spi->erase_size;
efx_mtd->part[0].mtd.type = MTD_RAM; n_parts++;
efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
efx_mtd->part[0].mtd.size =
min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
EFX_EEPROM_BOOTCONFIG_START;
efx_mtd->part[0].mtd.erasesize = spi->erase_size;
efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
efx_mtd->part[0].type_name = "sfc_bootconfig";
rc = efx_mtd_probe_device(efx, efx_mtd);
if (rc) {
kfree(efx_mtd);
return rc;
}
} }
rc = efx_mtd_add(efx, parts, n_parts);
if (rc)
kfree(parts);
return rc; return rc;
} }
...@@ -461,8 +426,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start, ...@@ -461,8 +426,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer) size_t len, size_t *retlen, u8 *buffer)
{ {
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv; struct efx_nic *efx = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
loff_t offset = start; loff_t offset = start;
loff_t end = min_t(loff_t, start + len, mtd->size); loff_t end = min_t(loff_t, start + len, mtd->size);
size_t chunk; size_t chunk;
...@@ -485,8 +449,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start, ...@@ -485,8 +449,7 @@ static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len) static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
{ {
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv; struct efx_nic *efx = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
loff_t offset = start & ~((loff_t)(mtd->erasesize - 1)); loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
loff_t end = min_t(loff_t, start + len, mtd->size); loff_t end = min_t(loff_t, start + len, mtd->size);
size_t chunk = part->mtd.erasesize; size_t chunk = part->mtd.erasesize;
...@@ -517,8 +480,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start, ...@@ -517,8 +480,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, const u8 *buffer) size_t len, size_t *retlen, const u8 *buffer)
{ {
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv; struct efx_nic *efx = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
loff_t offset = start; loff_t offset = start;
loff_t end = min_t(loff_t, start + len, mtd->size); loff_t end = min_t(loff_t, start + len, mtd->size);
size_t chunk; size_t chunk;
...@@ -548,8 +510,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start, ...@@ -548,8 +510,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
static int siena_mtd_sync(struct mtd_info *mtd) static int siena_mtd_sync(struct mtd_info *mtd)
{ {
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv; struct efx_nic *efx = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
int rc = 0; int rc = 0;
if (part->mcdi.updating) { if (part->mcdi.updating) {
...@@ -589,11 +550,9 @@ static const struct siena_nvram_type_info siena_nvram_types[] = { ...@@ -589,11 +550,9 @@ static const struct siena_nvram_type_info siena_nvram_types[] = {
}; };
static int siena_mtd_probe_partition(struct efx_nic *efx, static int siena_mtd_probe_partition(struct efx_nic *efx,
struct efx_mtd *efx_mtd, struct efx_mtd_partition *part,
unsigned int part_id,
unsigned int type) unsigned int type)
{ {
struct efx_mtd_partition *part = &efx_mtd->part[part_id];
const struct siena_nvram_type_info *info; const struct siena_nvram_type_info *info;
size_t size, erase_size; size_t size, erase_size;
bool protected; bool protected;
...@@ -615,6 +574,7 @@ static int siena_mtd_probe_partition(struct efx_nic *efx, ...@@ -615,6 +574,7 @@ static int siena_mtd_probe_partition(struct efx_nic *efx,
return -ENODEV; /* hide it */ return -ENODEV; /* hide it */
part->mcdi.nvram_type = type; part->mcdi.nvram_type = type;
part->dev_type_name = "Siena NVRAM manager";
part->type_name = info->name; part->type_name = info->name;
part->mtd.type = MTD_NORFLASH; part->mtd.type = MTD_NORFLASH;
...@@ -626,55 +586,54 @@ static int siena_mtd_probe_partition(struct efx_nic *efx, ...@@ -626,55 +586,54 @@ static int siena_mtd_probe_partition(struct efx_nic *efx,
} }
static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
struct efx_mtd *efx_mtd) struct efx_mtd_partition *parts,
size_t n_parts)
{ {
struct efx_mtd_partition *part;
uint16_t fw_subtype_list[ uint16_t fw_subtype_list[
MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM]; MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
size_t i;
int rc; int rc;
rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL); rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
if (rc) if (rc)
return rc; return rc;
efx_for_each_partition(part, efx_mtd) for (i = 0; i < n_parts; i++)
part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type]; parts[i].mcdi.fw_subtype =
fw_subtype_list[parts[i].mcdi.nvram_type];
return 0; return 0;
} }
static int siena_mtd_probe(struct efx_nic *efx) static int siena_mtd_probe(struct efx_nic *efx)
{ {
struct efx_mtd *efx_mtd; struct efx_mtd_partition *parts;
int rc = -ENODEV;
u32 nvram_types; u32 nvram_types;
unsigned int type; unsigned int type;
size_t n_parts;
int rc;
ASSERT_RTNL(); ASSERT_RTNL();
efx->mtd_ops = &siena_mtd_ops;
rc = efx_mcdi_nvram_types(efx, &nvram_types); rc = efx_mcdi_nvram_types(efx, &nvram_types);
if (rc) if (rc)
return rc; return rc;
efx_mtd = kzalloc(sizeof(*efx_mtd) + parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL);
hweight32(nvram_types) * sizeof(efx_mtd->part[0]), if (!parts)
GFP_KERNEL);
if (!efx_mtd)
return -ENOMEM; return -ENOMEM;
efx_mtd->name = "Siena NVRAM manager";
efx_mtd->ops = &siena_mtd_ops;
type = 0; type = 0;
efx_mtd->n_parts = 0; n_parts = 0;
while (nvram_types != 0) { while (nvram_types != 0) {
if (nvram_types & 1) { if (nvram_types & 1) {
rc = siena_mtd_probe_partition(efx, efx_mtd, rc = siena_mtd_probe_partition(efx, &parts[n_parts],
efx_mtd->n_parts, type); type);
if (rc == 0) if (rc == 0)
efx_mtd->n_parts++; n_parts++;
else if (rc != -ENODEV) else if (rc != -ENODEV)
goto fail; goto fail;
} }
...@@ -682,14 +641,14 @@ static int siena_mtd_probe(struct efx_nic *efx) ...@@ -682,14 +641,14 @@ static int siena_mtd_probe(struct efx_nic *efx)
nvram_types >>= 1; nvram_types >>= 1;
} }
rc = siena_mtd_get_fw_subtypes(efx, efx_mtd); rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts);
if (rc) if (rc)
goto fail; goto fail;
rc = efx_mtd_probe_device(efx, efx_mtd); rc = efx_mtd_add(efx, parts, n_parts);
fail: fail:
if (rc) if (rc)
kfree(efx_mtd); kfree(parts);
return rc; return rc;
} }
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "enum.h" #include "enum.h"
#include "bitfield.h" #include "bitfield.h"
#include "filter.h"
/************************************************************************** /**************************************************************************
* *
...@@ -356,6 +357,7 @@ enum efx_rx_alloc_method { ...@@ -356,6 +357,7 @@ enum efx_rx_alloc_method {
* @efx: Associated Efx NIC * @efx: Associated Efx NIC
* @channel: Channel instance number * @channel: Channel instance number
* @type: Channel type definition * @type: Channel type definition
* @eventq_init: Event queue initialised flag
* @enabled: Channel enabled indicator * @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only) * @irq: IRQ number (MSI and MSI-X only)
* @irq_moderation: IRQ moderation value (in hardware ticks) * @irq_moderation: IRQ moderation value (in hardware ticks)
...@@ -387,6 +389,7 @@ struct efx_channel { ...@@ -387,6 +389,7 @@ struct efx_channel {
struct efx_nic *efx; struct efx_nic *efx;
int channel; int channel;
const struct efx_channel_type *type; const struct efx_channel_type *type;
bool eventq_init;
bool enabled; bool enabled;
int irq; int irq;
unsigned int irq_moderation; unsigned int irq_moderation;
...@@ -674,7 +677,6 @@ union efx_multicast_hash { ...@@ -674,7 +677,6 @@ union efx_multicast_hash {
efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
}; };
struct efx_filter_state;
struct efx_vf; struct efx_vf;
struct vfdi_status; struct vfdi_status;
...@@ -751,8 +753,10 @@ struct vfdi_status; ...@@ -751,8 +753,10 @@ struct vfdi_status;
* @link_advertising: Autonegotiation advertising flags * @link_advertising: Autonegotiation advertising flags
* @link_state: Current state of the link * @link_state: Current state of the link
* @n_link_state_changes: Number of times the link has changed state * @n_link_state_changes: Number of times the link has changed state
* @promiscuous: Promiscuous flag. Protected by netif_tx_lock. * @unicast_filter: Flag for Falcon-arch simple unicast filter.
* @multicast_hash: Multicast hash table * Protected by @mac_lock.
* @multicast_hash: Multicast hash table for Falcon-arch.
* Protected by @mac_lock.
* @wanted_fc: Wanted flow control flags * @wanted_fc: Wanted flow control flags
* @fc_disable: When non-zero flow control is disabled. Typically used to * @fc_disable: When non-zero flow control is disabled. Typically used to
* ensure that network back pressure doesn't delay dma queue flushes. * ensure that network back pressure doesn't delay dma queue flushes.
...@@ -761,6 +765,11 @@ struct vfdi_status; ...@@ -761,6 +765,11 @@ struct vfdi_status;
* @loopback_mode: Loopback status * @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask * @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state * @loopback_selftest: Offline self-test private state
* @filter_lock: Filter table lock
* @filter_state: Architecture-dependent filter table state
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
* indexed by filter ID
* @rps_expire_index: Next index to check for expiry in @rps_flow_id
* @drain_pending: Count of RX and TX queues that haven't been flushed and drained. * @drain_pending: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed. * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called. * Decremented when the efx_flush_rx_queue() is called.
...@@ -832,6 +841,8 @@ struct efx_nic { ...@@ -832,6 +841,8 @@ struct efx_nic {
unsigned rx_dc_base; unsigned rx_dc_base;
unsigned sram_lim_qw; unsigned sram_lim_qw;
unsigned next_buffer_table; unsigned next_buffer_table;
unsigned int max_channels;
unsigned n_channels; unsigned n_channels;
unsigned n_rx_channels; unsigned n_rx_channels;
unsigned rss_spread; unsigned rss_spread;
...@@ -857,6 +868,7 @@ struct efx_nic { ...@@ -857,6 +868,7 @@ struct efx_nic {
struct delayed_work selftest_work; struct delayed_work selftest_work;
#ifdef CONFIG_SFC_MTD #ifdef CONFIG_SFC_MTD
const struct efx_mtd_ops *mtd_ops;
struct list_head mtd_list; struct list_head mtd_list;
#endif #endif
...@@ -883,7 +895,7 @@ struct efx_nic { ...@@ -883,7 +895,7 @@ struct efx_nic {
struct efx_link_state link_state; struct efx_link_state link_state;
unsigned int n_link_state_changes; unsigned int n_link_state_changes;
bool promiscuous; bool unicast_filter;
union efx_multicast_hash multicast_hash; union efx_multicast_hash multicast_hash;
u8 wanted_fc; u8 wanted_fc;
unsigned fc_disable; unsigned fc_disable;
...@@ -894,7 +906,12 @@ struct efx_nic { ...@@ -894,7 +906,12 @@ struct efx_nic {
void *loopback_selftest; void *loopback_selftest;
struct efx_filter_state *filter_state; spinlock_t filter_lock;
void *filter_state;
#ifdef CONFIG_RFS_ACCEL
u32 *rps_flow_id;
unsigned int rps_expire_index;
#endif
atomic_t drain_pending; atomic_t drain_pending;
atomic_t rxq_flush_pending; atomic_t rxq_flush_pending;
...@@ -939,6 +956,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) ...@@ -939,6 +956,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
/** /**
* struct efx_nic_type - Efx device type definition * struct efx_nic_type - Efx device type definition
* @mem_map_size: Get memory BAR mapped size
* @probe: Probe the controller * @probe: Probe the controller
* @remove: Free resources allocated by probe() * @remove: Free resources allocated by probe()
* @init: Initialise the controller * @init: Initialise the controller
...@@ -1011,8 +1029,25 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) ...@@ -1011,8 +1029,25 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @ev_process: Process events for a queue, up to the given NAPI quota * @ev_process: Process events for a queue, up to the given NAPI quota
* @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
* @ev_test_generate: Generate a test event * @ev_test_generate: Generate a test event
* @filter_table_probe: Probe filter capabilities and set up filter software state
* @filter_table_restore: Restore filters removed from hardware
* @filter_table_remove: Remove filters from hardware and tear down software state
* @filter_update_rx_scatter: Update filters after change to rx scatter setting
* @filter_insert: add or replace a filter
* @filter_remove_safe: remove a filter by ID, carefully
* @filter_get_safe: retrieve a filter by ID, carefully
* @filter_clear_rx: remove RX filters by priority
* @filter_count_rx_used: Get the number of filters in use at a given priority
* @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
* @filter_get_rx_ids: Get list of RX filters at a given priority
* @filter_rfs_insert: Add or replace a filter for RFS. This must be
* atomic. The hardware change may be asynchronous but should
* not be delayed for long. It may fail if this can't be done
* atomically.
* @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
* This must check whether the specified table entry is used by RFS
* and that rps_may_expire_flow() returns true for it.
* @revision: Hardware architecture revision * @revision: Hardware architecture revision
* @mem_map_size: Memory BAR mapped size
* @txd_ptr_tbl_base: TX descriptor ring base address * @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address * @rxd_ptr_tbl_base: RX descriptor ring base address
* @buf_tbl_base: Buffer table base address * @buf_tbl_base: Buffer table base address
...@@ -1024,14 +1059,13 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) ...@@ -1024,14 +1059,13 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @can_rx_scatter: NIC is able to scatter packet to multiple buffers * @can_rx_scatter: NIC is able to scatter packet to multiple buffers
* @max_interrupt_mode: Highest capability interrupt mode supported * @max_interrupt_mode: Highest capability interrupt mode supported
* from &enum efx_init_mode. * from &enum efx_init_mode.
* @phys_addr_channels: Number of channels with physically addressed
* descriptors
* @timer_period_max: Maximum period of interrupt timer (in ticks) * @timer_period_max: Maximum period of interrupt timer (in ticks)
* @offload_features: net_device feature flags for protocol offload * @offload_features: net_device feature flags for protocol offload
* features implemented in hardware * features implemented in hardware
* @mcdi_max_ver: Maximum MCDI version supported * @mcdi_max_ver: Maximum MCDI version supported
*/ */
struct efx_nic_type { struct efx_nic_type {
unsigned int (*mem_map_size)(struct efx_nic *efx);
int (*probe)(struct efx_nic *efx); int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx); void (*remove)(struct efx_nic *efx);
int (*init)(struct efx_nic *efx); int (*init)(struct efx_nic *efx);
...@@ -1090,9 +1124,34 @@ struct efx_nic_type { ...@@ -1090,9 +1124,34 @@ struct efx_nic_type {
int (*ev_process)(struct efx_channel *channel, int quota); int (*ev_process)(struct efx_channel *channel, int quota);
void (*ev_read_ack)(struct efx_channel *channel); void (*ev_read_ack)(struct efx_channel *channel);
void (*ev_test_generate)(struct efx_channel *channel); void (*ev_test_generate)(struct efx_channel *channel);
int (*filter_table_probe)(struct efx_nic *efx);
void (*filter_table_restore)(struct efx_nic *efx);
void (*filter_table_remove)(struct efx_nic *efx);
void (*filter_update_rx_scatter)(struct efx_nic *efx);
s32 (*filter_insert)(struct efx_nic *efx,
struct efx_filter_spec *spec, bool replace);
int (*filter_remove_safe)(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id);
int (*filter_get_safe)(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *);
void (*filter_clear_rx)(struct efx_nic *efx,
enum efx_filter_priority priority);
u32 (*filter_count_rx_used)(struct efx_nic *efx,
enum efx_filter_priority priority);
u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
s32 (*filter_get_rx_ids)(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 *buf, u32 size);
#ifdef CONFIG_RFS_ACCEL
s32 (*filter_rfs_insert)(struct efx_nic *efx,
struct efx_filter_spec *spec);
bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
unsigned int index);
#endif
int revision; int revision;
unsigned int mem_map_size;
unsigned int txd_ptr_tbl_base; unsigned int txd_ptr_tbl_base;
unsigned int rxd_ptr_tbl_base; unsigned int rxd_ptr_tbl_base;
unsigned int buf_tbl_base; unsigned int buf_tbl_base;
...@@ -1103,10 +1162,10 @@ struct efx_nic_type { ...@@ -1103,10 +1162,10 @@ struct efx_nic_type {
unsigned int rx_buffer_padding; unsigned int rx_buffer_padding;
bool can_rx_scatter; bool can_rx_scatter;
unsigned int max_interrupt_mode; unsigned int max_interrupt_mode;
unsigned int phys_addr_channels;
unsigned int timer_period_max; unsigned int timer_period_max;
netdev_features_t offload_features; netdev_features_t offload_features;
int mcdi_max_ver; int mcdi_max_ver;
unsigned int max_rx_ip_filters;
}; };
/************************************************************************** /**************************************************************************
......
...@@ -184,8 +184,8 @@ struct falcon_nic_data { ...@@ -184,8 +184,8 @@ struct falcon_nic_data {
bool stats_pending; bool stats_pending;
struct timer_list stats_timer; struct timer_list stats_timer;
u32 *stats_dma_done; u32 *stats_dma_done;
struct efx_spi_device spi_flash; struct falcon_spi_device spi_flash;
struct efx_spi_device spi_eeprom; struct falcon_spi_device spi_eeprom;
struct mutex spi_lock; struct mutex spi_lock;
struct mutex mdio_lock; struct mutex mdio_lock;
bool xmac_poll_required; bool xmac_poll_required;
...@@ -404,6 +404,35 @@ extern int efx_farch_ev_process(struct efx_channel *channel, int quota); ...@@ -404,6 +404,35 @@ extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
extern void efx_farch_ev_read_ack(struct efx_channel *channel); extern void efx_farch_ev_read_ack(struct efx_channel *channel);
extern void efx_farch_ev_test_generate(struct efx_channel *channel); extern void efx_farch_ev_test_generate(struct efx_channel *channel);
/* Falcon/Siena filter operations */
extern int efx_farch_filter_table_probe(struct efx_nic *efx);
extern void efx_farch_filter_table_restore(struct efx_nic *efx);
extern void efx_farch_filter_table_remove(struct efx_nic *efx);
extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
extern s32 efx_farch_filter_insert(struct efx_nic *efx,
struct efx_filter_spec *spec, bool replace);
extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id);
extern int efx_farch_filter_get_safe(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 filter_id, struct efx_filter_spec *);
extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority);
extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
enum efx_filter_priority priority);
extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
enum efx_filter_priority priority,
u32 *buf, u32 size);
#ifdef CONFIG_RFS_ACCEL
extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
struct efx_filter_spec *spec);
extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
unsigned int index);
#endif
extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
extern bool efx_nic_event_present(struct efx_channel *channel); extern bool efx_nic_event_present(struct efx_channel *channel);
/* Some statistics are computed as A - B where A and B each increase /* Some statistics are computed as A - B where A and B each increase
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <net/checksum.h> #include <net/checksum.h>
#include "net_driver.h" #include "net_driver.h"
#include "efx.h" #include "efx.h"
#include "filter.h"
#include "nic.h" #include "nic.h"
#include "selftest.h" #include "selftest.h"
#include "workarounds.h" #include "workarounds.h"
...@@ -802,3 +803,96 @@ module_param(rx_refill_threshold, uint, 0444); ...@@ -802,3 +803,96 @@ module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold, MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring refill threshold (%)"); "RX descriptor ring refill threshold (%)");
#ifdef CONFIG_RFS_ACCEL
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_filter_spec spec;
const struct iphdr *ip;
const __be16 *ports;
int nhoff;
int rc;
nhoff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_8021Q)) {
EFX_BUG_ON_PARANOID(skb_headlen(skb) <
nhoff + sizeof(struct vlan_hdr));
if (((const struct vlan_hdr *)skb->data + nhoff)->
h_vlan_encapsulated_proto != htons(ETH_P_IP))
return -EPROTONOSUPPORT;
/* This is IP over 802.1q VLAN. We can't filter on the
* IP 5-tuple and the vlan together, so just strip the
* vlan header and filter on the IP part.
*/
nhoff += sizeof(struct vlan_hdr);
} else if (skb->protocol != htons(ETH_P_IP)) {
return -EPROTONOSUPPORT;
}
/* RFS must validate the IP header length before calling us */
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
ip = (const struct iphdr *)(skb->data + nhoff);
if (ip_is_fragment(ip))
return -EPROTONOSUPPORT;
EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
rxq_index);
rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
ip->daddr, ports[1], ip->saddr, ports[0]);
if (rc)
return rc;
rc = efx->type->filter_rfs_insert(efx, &spec);
if (rc < 0)
return rc;
/* Remember this so we can check whether to expire the filter later */
efx->rps_flow_id[rc] = flow_id;
channel = efx_get_channel(efx, skb_get_rx_queue(skb));
++channel->rfs_filters_added;
netif_info(efx, rx_status, efx->net_dev,
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
(ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
&ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
rxq_index, flow_id, rc);
return rc;
}
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
{
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
unsigned int index, size;
u32 flow_id;
if (!spin_trylock_bh(&efx->filter_lock))
return false;
expire_one = efx->type->filter_rfs_expire_one;
index = efx->rps_expire_index;
size = efx->type->max_rx_ip_filters;
while (quota--) {
flow_id = efx->rps_flow_id[index];
if (expire_one(efx, flow_id, index))
netif_info(efx, rx_status, efx->net_dev,
"expired filter %d [flow %u]\n",
index, flow_id);
if (++index == size)
index = 0;
}
efx->rps_expire_index = index;
spin_unlock_bh(&efx->filter_lock);
return true;
}
#endif /* CONFIG_RFS_ACCEL */
...@@ -187,6 +187,12 @@ static void siena_dimension_resources(struct efx_nic *efx) ...@@ -187,6 +187,12 @@ static void siena_dimension_resources(struct efx_nic *efx)
efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
} }
static unsigned int siena_mem_map_size(struct efx_nic *efx)
{
return FR_CZ_MC_TREG_SMEM +
FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS;
}
static int siena_probe_nic(struct efx_nic *efx) static int siena_probe_nic(struct efx_nic *efx)
{ {
struct siena_nic_data *nic_data; struct siena_nic_data *nic_data;
...@@ -207,6 +213,8 @@ static int siena_probe_nic(struct efx_nic *efx) ...@@ -207,6 +213,8 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail1; goto fail1;
} }
efx->max_channels = EFX_MAX_CHANNELS;
efx_reado(efx, &reg, FR_AZ_CS_DEBUG); efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
...@@ -495,6 +503,8 @@ static int siena_mac_reconfigure(struct efx_nic *efx) ...@@ -495,6 +503,8 @@ static int siena_mac_reconfigure(struct efx_nic *efx)
MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST + MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST +
sizeof(efx->multicast_hash)); sizeof(efx->multicast_hash));
efx_farch_filter_sync_rx_mode(efx);
WARN_ON(!mutex_is_locked(&efx->mac_lock)); WARN_ON(!mutex_is_locked(&efx->mac_lock));
rc = efx_mcdi_set_mac(efx); rc = efx_mcdi_set_mac(efx);
...@@ -670,6 +680,7 @@ static int siena_mcdi_poll_reboot(struct efx_nic *efx) ...@@ -670,6 +680,7 @@ static int siena_mcdi_poll_reboot(struct efx_nic *efx)
*/ */
const struct efx_nic_type siena_a0_nic_type = { const struct efx_nic_type siena_a0_nic_type = {
.mem_map_size = siena_mem_map_size,
.probe = siena_probe_nic, .probe = siena_probe_nic,
.remove = siena_remove_nic, .remove = siena_remove_nic,
.init = siena_init_nic, .init = siena_init_nic,
...@@ -727,10 +738,23 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -727,10 +738,23 @@ const struct efx_nic_type siena_a0_nic_type = {
.ev_process = efx_farch_ev_process, .ev_process = efx_farch_ev_process,
.ev_read_ack = efx_farch_ev_read_ack, .ev_read_ack = efx_farch_ev_read_ack,
.ev_test_generate = efx_farch_ev_test_generate, .ev_test_generate = efx_farch_ev_test_generate,
.filter_table_probe = efx_farch_filter_table_probe,
.filter_table_restore = efx_farch_filter_table_restore,
.filter_table_remove = efx_farch_filter_table_remove,
.filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
.filter_insert = efx_farch_filter_insert,
.filter_remove_safe = efx_farch_filter_remove_safe,
.filter_get_safe = efx_farch_filter_get_safe,
.filter_clear_rx = efx_farch_filter_clear_rx,
.filter_count_rx_used = efx_farch_filter_count_rx_used,
.filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
.filter_get_rx_ids = efx_farch_filter_get_rx_ids,
#ifdef CONFIG_RFS_ACCEL
.filter_rfs_insert = efx_farch_filter_rfs_insert,
.filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
#endif
.revision = EFX_REV_SIENA_A0, .revision = EFX_REV_SIENA_A0,
.mem_map_size = (FR_CZ_MC_TREG_SMEM +
FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL, .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
...@@ -741,11 +765,9 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -741,11 +765,9 @@ const struct efx_nic_type siena_a0_nic_type = {
.rx_buffer_padding = 0, .rx_buffer_padding = 0,
.can_rx_scatter = true, .can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX, .max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32
* channels */
.timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXHASH | NETIF_F_NTUPLE), NETIF_F_RXHASH | NETIF_F_NTUPLE),
.mcdi_max_ver = 1, .mcdi_max_ver = 1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
}; };
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#define SPI_STATUS_NRDY 0x01 /* Device busy flag */ #define SPI_STATUS_NRDY 0x01 /* Device busy flag */
/** /**
* struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
* @device_id: Controller's id for the device * @device_id: Controller's id for the device
* @size: Size (in bytes) * @size: Size (in bytes)
* @addr_len: Number of address bytes in read/write commands * @addr_len: Number of address bytes in read/write commands
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
* @block_size: Write block size (in bytes). * @block_size: Write block size (in bytes).
* Write commands are limited to blocks with this size and alignment. * Write commands are limited to blocks with this size and alignment.
*/ */
struct efx_spi_device { struct falcon_spi_device {
int device_id; int device_id;
unsigned int size; unsigned int size;
unsigned int addr_len; unsigned int addr_len;
...@@ -61,21 +61,21 @@ struct efx_spi_device { ...@@ -61,21 +61,21 @@ struct efx_spi_device {
unsigned int block_size; unsigned int block_size;
}; };
static inline bool efx_spi_present(const struct efx_spi_device *spi) static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
{ {
return spi->size != 0; return spi->size != 0;
} }
int falcon_spi_cmd(struct efx_nic *efx, int falcon_spi_cmd(struct efx_nic *efx,
const struct efx_spi_device *spi, unsigned int command, const struct falcon_spi_device *spi, unsigned int command,
int address, const void *in, void *out, size_t len); int address, const void *in, void *out, size_t len);
int falcon_spi_wait_write(struct efx_nic *efx, int falcon_spi_wait_write(struct efx_nic *efx,
const struct efx_spi_device *spi); const struct falcon_spi_device *spi);
int falcon_spi_read(struct efx_nic *efx, int falcon_spi_read(struct efx_nic *efx,
const struct efx_spi_device *spi, loff_t start, const struct falcon_spi_device *spi, loff_t start,
size_t len, size_t *retlen, u8 *buffer); size_t len, size_t *retlen, u8 *buffer);
int falcon_spi_write(struct efx_nic *efx, int falcon_spi_write(struct efx_nic *efx,
const struct efx_spi_device *spi, loff_t start, const struct falcon_spi_device *spi, loff_t start,
size_t len, size_t *retlen, const u8 *buffer); size_t len, size_t *retlen, const u8 *buffer);
/* /*
...@@ -93,7 +93,7 @@ int falcon_spi_write(struct efx_nic *efx, ...@@ -93,7 +93,7 @@ int falcon_spi_write(struct efx_nic *efx,
*/ */
#define FALCON_NVCONFIG_END 0x400U #define FALCON_NVCONFIG_END 0x400U
#define FALCON_FLASH_BOOTCODE_START 0x8000U #define FALCON_FLASH_BOOTCODE_START 0x8000U
#define EFX_EEPROM_BOOTCONFIG_START 0x800U #define FALCON_EEPROM_BOOTCONFIG_START 0x800U
#define EFX_EEPROM_BOOTCONFIG_END 0x1800U #define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
#endif /* EFX_SPI_H */ #endif /* EFX_SPI_H */
...@@ -15,25 +15,15 @@ ...@@ -15,25 +15,15 @@
* Bug numbers are from Solarflare's Bugzilla. * Bug numbers are from Solarflare's Bugzilla.
*/ */
#define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) #define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0) #define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) #define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
#define EFX_WORKAROUND_10G(efx) 1 #define EFX_WORKAROUND_10G(efx) 1
/* XAUI resets if link not detected */
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
/* RX PCIe double split performance issue */
#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
/* Bit-bashed I2C reads cause performance drop */ /* Bit-bashed I2C reads cause performance drop */
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G #define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
* or a PCIe error (bug 11028) */
#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
/* Truncated IPv4 packets can confuse the TX packet parser */ /* Truncated IPv4 packets can confuse the TX packet parser */
#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB #define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
/* Legacy ISR read can return zero once */
#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */ /* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment