Commit 8dcdc952 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'sfc-ARFS-expiry-improvements'

Edward Cree says:

====================
A series of changes to how we check filters for expiry, manage how much
 of that work to do & when, etc.
Prompted by some pathological behaviour under heavy load, which was
Reported-by: default avatarDavid Ahern <dahern@digitalocean.com>
====================
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
parents c4f2cbd3 6fbc05e5
...@@ -4202,11 +4202,15 @@ static int efx_ef10_filter_push(struct efx_nic *efx, ...@@ -4202,11 +4202,15 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
{ {
MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
size_t outlen;
int rc; int rc;
efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing); efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing);
rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), NULL); outbuf, sizeof(outbuf), &outlen);
if (rc && spec->priority != EFX_FILTER_PRI_HINT)
efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, sizeof(inbuf),
outbuf, outlen, rc);
if (rc == 0) if (rc == 0)
*handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
if (rc == -ENOSPC) if (rc == -ENOSPC)
......
...@@ -355,7 +355,7 @@ static int efx_poll(struct napi_struct *napi, int budget) ...@@ -355,7 +355,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
/* Perhaps expire some ARFS filters */ /* Perhaps expire some ARFS filters */
schedule_work(&channel->filter_work); mod_delayed_work(system_wq, &channel->filter_work, 0);
#endif #endif
/* There is no race here; although napi_disable() will /* There is no race here; although napi_disable() will
...@@ -487,7 +487,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) ...@@ -487,7 +487,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
} }
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
INIT_WORK(&channel->filter_work, efx_filter_rfs_expire); INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
#endif #endif
rx_queue = &channel->rx_queue; rx_queue = &channel->rx_queue;
...@@ -533,7 +533,7 @@ efx_copy_channel(const struct efx_channel *old_channel) ...@@ -533,7 +533,7 @@ efx_copy_channel(const struct efx_channel *old_channel)
memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
INIT_WORK(&channel->filter_work, efx_filter_rfs_expire); INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
#endif #endif
return channel; return channel;
...@@ -1969,6 +1969,8 @@ static int efx_probe_filters(struct efx_nic *efx) ...@@ -1969,6 +1969,8 @@ static int efx_probe_filters(struct efx_nic *efx)
++i) ++i)
channel->rps_flow_id[i] = channel->rps_flow_id[i] =
RPS_FLOW_ID_INVALID; RPS_FLOW_ID_INVALID;
channel->rfs_expire_index = 0;
channel->rfs_filter_count = 0;
} }
if (!success) { if (!success) {
...@@ -1978,8 +1980,6 @@ static int efx_probe_filters(struct efx_nic *efx) ...@@ -1978,8 +1980,6 @@ static int efx_probe_filters(struct efx_nic *efx)
rc = -ENOMEM; rc = -ENOMEM;
goto out_unlock; goto out_unlock;
} }
efx->rps_expire_index = efx->rps_expire_channel = 0;
} }
#endif #endif
out_unlock: out_unlock:
...@@ -1993,8 +1993,10 @@ static void efx_remove_filters(struct efx_nic *efx) ...@@ -1993,8 +1993,10 @@ static void efx_remove_filters(struct efx_nic *efx)
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
struct efx_channel *channel; struct efx_channel *channel;
efx_for_each_channel(channel, efx) efx_for_each_channel(channel, efx) {
cancel_delayed_work_sync(&channel->filter_work);
kfree(channel->rps_flow_id); kfree(channel->rps_flow_id);
}
#endif #endif
down_write(&efx->filter_sem); down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx); efx->type->filter_table_remove(efx);
......
...@@ -166,15 +166,20 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, ...@@ -166,15 +166,20 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id); u16 rxq_index, u32 flow_id);
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota); bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
static inline void efx_filter_rfs_expire(struct work_struct *data) static inline void efx_filter_rfs_expire(struct work_struct *data)
{ {
struct efx_channel *channel = container_of(data, struct efx_channel, struct delayed_work *dwork = to_delayed_work(data);
filter_work); struct efx_channel *channel;
unsigned int time, quota;
if (channel->rfs_filters_added >= 60 &&
__efx_filter_rfs_expire(channel->efx, 100)) channel = container_of(dwork, struct efx_channel, filter_work);
channel->rfs_filters_added -= 60; time = jiffies - channel->rfs_last_expiry;
quota = channel->rfs_filter_count * time / (30 * HZ);
if (quota > 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
channel->rfs_last_expiry += time;
/* Ensure we do more work eventually even if NAPI poll is not happening */
schedule_delayed_work(dwork, 30 * HZ);
} }
#define efx_filter_rfs_enabled() 1 #define efx_filter_rfs_enabled() 1
#else #else
......
...@@ -56,6 +56,9 @@ static u64 efx_get_atomic_stat(void *field) ...@@ -56,6 +56,9 @@ static u64 efx_get_atomic_stat(void *field)
#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \ #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
EFX_ETHTOOL_STAT(field, channel, n_##field, \ EFX_ETHTOOL_STAT(field, channel, n_##field, \
unsigned int, efx_get_uint_stat) unsigned int, efx_get_uint_stat)
#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field) \
EFX_ETHTOOL_STAT(field, channel, field, \
unsigned int, efx_get_uint_stat)
#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \ #define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
...@@ -87,6 +90,9 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = { ...@@ -87,6 +90,9 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed),
}; };
#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc) #define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
......
...@@ -439,6 +439,13 @@ enum efx_sync_events_state { ...@@ -439,6 +439,13 @@ enum efx_sync_events_state {
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision * @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score * @irq_mod_score: IRQ moderation score
* @rfs_filter_count: number of accelerated RFS filters currently in place;
* equals the count of @rps_flow_id slots filled
* @rfs_last_expiry: value of jiffies last time some accelerated RFS filters
* were checked for expiry
* @rfs_expire_index: next accelerated RFS filter ID to check for expiry
* @n_rfs_succeeded: number of successful accelerated RFS filter insertions
* @n_rfs_failed; number of failed accelerated RFS filter insertions
* @filter_work: Work item for efx_filter_rfs_expire() * @filter_work: Work item for efx_filter_rfs_expire()
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
* indexed by filter ID * indexed by filter ID
...@@ -489,8 +496,12 @@ struct efx_channel { ...@@ -489,8 +496,12 @@ struct efx_channel {
unsigned int irq_count; unsigned int irq_count;
unsigned int irq_mod_score; unsigned int irq_mod_score;
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
unsigned int rfs_filters_added; unsigned int rfs_filter_count;
struct work_struct filter_work; unsigned int rfs_last_expiry;
unsigned int rfs_expire_index;
unsigned int n_rfs_succeeded;
unsigned int n_rfs_failed;
struct delayed_work filter_work;
#define RPS_FLOW_ID_INVALID 0xFFFFFFFF #define RPS_FLOW_ID_INVALID 0xFFFFFFFF
u32 *rps_flow_id; u32 *rps_flow_id;
#endif #endif
...@@ -923,9 +934,6 @@ struct efx_async_filter_insertion { ...@@ -923,9 +934,6 @@ struct efx_async_filter_insertion {
* @filter_sem: Filter table rw_semaphore, protects existence of @filter_state * @filter_sem: Filter table rw_semaphore, protects existence of @filter_state
* @filter_state: Architecture-dependent filter table state * @filter_state: Architecture-dependent filter table state
* @rps_mutex: Protects RPS state of all channels * @rps_mutex: Protects RPS state of all channels
* @rps_expire_channel: Next channel to check for expiry
* @rps_expire_index: Next index to check for expiry in
* @rps_expire_channel's @rps_flow_id
* @rps_slot_map: bitmap of in-flight entries in @rps_slot * @rps_slot_map: bitmap of in-flight entries in @rps_slot
* @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work() * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work()
* @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and
...@@ -1096,8 +1104,6 @@ struct efx_nic { ...@@ -1096,8 +1104,6 @@ struct efx_nic {
void *filter_state; void *filter_state;
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
struct mutex rps_mutex; struct mutex rps_mutex;
unsigned int rps_expire_channel;
unsigned int rps_expire_index;
unsigned long rps_slot_map; unsigned long rps_slot_map;
struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT]; struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT];
spinlock_t rps_hash_lock; spinlock_t rps_hash_lock;
......
...@@ -988,6 +988,7 @@ static void efx_filter_rfs_work(struct work_struct *data) ...@@ -988,6 +988,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
rc = efx->type->filter_insert(efx, &req->spec, true); rc = efx->type->filter_insert(efx, &req->spec, true);
if (rc >= 0) if (rc >= 0)
/* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
rc %= efx->type->max_rx_ip_filters; rc %= efx->type->max_rx_ip_filters;
if (efx->rps_hash_table) { if (efx->rps_hash_table) {
spin_lock_bh(&efx->rps_hash_lock); spin_lock_bh(&efx->rps_hash_lock);
...@@ -1012,8 +1013,9 @@ static void efx_filter_rfs_work(struct work_struct *data) ...@@ -1012,8 +1013,9 @@ static void efx_filter_rfs_work(struct work_struct *data)
* later. * later.
*/ */
mutex_lock(&efx->rps_mutex); mutex_lock(&efx->rps_mutex);
if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
channel->rfs_filter_count++;
channel->rps_flow_id[rc] = req->flow_id; channel->rps_flow_id[rc] = req->flow_id;
++channel->rfs_filters_added;
mutex_unlock(&efx->rps_mutex); mutex_unlock(&efx->rps_mutex);
if (req->spec.ether_type == htons(ETH_P_IP)) if (req->spec.ether_type == htons(ETH_P_IP))
...@@ -1030,6 +1032,28 @@ static void efx_filter_rfs_work(struct work_struct *data) ...@@ -1030,6 +1032,28 @@ static void efx_filter_rfs_work(struct work_struct *data)
req->spec.rem_host, ntohs(req->spec.rem_port), req->spec.rem_host, ntohs(req->spec.rem_port),
req->spec.loc_host, ntohs(req->spec.loc_port), req->spec.loc_host, ntohs(req->spec.loc_port),
req->rxq_index, req->flow_id, rc, arfs_id); req->rxq_index, req->flow_id, rc, arfs_id);
channel->n_rfs_succeeded++;
} else {
if (req->spec.ether_type == htons(ETH_P_IP))
netif_dbg(efx, rx_status, efx->net_dev,
"failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
req->spec.rem_host, ntohs(req->spec.rem_port),
req->spec.loc_host, ntohs(req->spec.loc_port),
req->rxq_index, req->flow_id, rc, arfs_id);
else
netif_dbg(efx, rx_status, efx->net_dev,
"failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
req->spec.rem_host, ntohs(req->spec.rem_port),
req->spec.loc_host, ntohs(req->spec.loc_port),
req->rxq_index, req->flow_id, rc, arfs_id);
channel->n_rfs_failed++;
/* We're overloading the NIC's filter tables, so let's do a
* chunk of extra expiry work.
*/
__efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
100u));
} }
/* Release references */ /* Release references */
...@@ -1139,38 +1163,44 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, ...@@ -1139,38 +1163,44 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
return rc; return rc;
} }
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
{ {
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
unsigned int channel_idx, index, size; struct efx_nic *efx = channel->efx;
unsigned int index, size, start;
u32 flow_id; u32 flow_id;
if (!mutex_trylock(&efx->rps_mutex)) if (!mutex_trylock(&efx->rps_mutex))
return false; return false;
expire_one = efx->type->filter_rfs_expire_one; expire_one = efx->type->filter_rfs_expire_one;
channel_idx = efx->rps_expire_channel; index = channel->rfs_expire_index;
index = efx->rps_expire_index; start = index;
size = efx->type->max_rx_ip_filters; size = efx->type->max_rx_ip_filters;
while (quota--) { while (quota) {
struct efx_channel *channel = efx_get_channel(efx, channel_idx);
flow_id = channel->rps_flow_id[index]; flow_id = channel->rps_flow_id[index];
if (flow_id != RPS_FLOW_ID_INVALID && if (flow_id != RPS_FLOW_ID_INVALID) {
expire_one(efx, flow_id, index)) { quota--;
netif_info(efx, rx_status, efx->net_dev, if (expire_one(efx, flow_id, index)) {
"expired filter %d [queue %u flow %u]\n", netif_info(efx, rx_status, efx->net_dev,
index, channel_idx, flow_id); "expired filter %d [channel %u flow %u]\n",
channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; index, channel->channel, flow_id);
channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
channel->rfs_filter_count--;
}
} }
if (++index == size) { if (++index == size)
if (++channel_idx == efx->n_channels)
channel_idx = 0;
index = 0; index = 0;
} /* If we were called with a quota that exceeds the total number
* of filters in the table (which shouldn't happen, but could
* if two callers race), ensure that we don't loop forever -
* stop when we've examined every row of the table.
*/
if (index == start)
break;
} }
efx->rps_expire_channel = channel_idx;
efx->rps_expire_index = index;
channel->rfs_expire_index = index;
mutex_unlock(&efx->rps_mutex); mutex_unlock(&efx->rps_mutex);
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment